repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Kernerl size of conv layers.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):\r\n \"\"\"Construct an ConvolutionModule object.\r\n \"\"\"\r\n super(ConvolutionModule, self).__init__()\r\n # kernerl_size should be a odd number for 'SAME' padding\r\n assert (kernel_size - 1) % 2 == 0\r\n\r\n self.pointwise_conv1 = nn.Conv1d(\r\n channels,\r\n 2 * channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.depthwise_conv = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n groups=channels,\r\n bias=bias,\r\n )\r\n self.norm = nn.BatchNorm1d(channels)\r\n self.pointwise_conv2 = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Compute convolution module.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, channels).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, channels).\r\n\r\n \"\"\"\r\n # exchange the temporal dimension and the feature dimension\r\n x = x.transpose(1, 2)\r\n\r\n # GLU mechanism\r\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\r\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\r\n\r\n # 1D Depthwise Conv\r\n x = self.depthwise_conv(x)\r\n x = self.activation(self.norm(x))\r\n\r\n x = self.pointwise_conv2(x)\r\n\r\n return x.transpose(1, 2)\r" }, { "identifier": "EncoderLayer", "path": "src/clap_module/conformer/encoder_layer.py", "snippet": "class EncoderLayer(nn.Module):\r\n \"\"\"Encoder layer module.\r\n\r\n Args:\r\n size (int): Input dimension.\r\n self_attn (torch.nn.Module): Self-attention module instance.\r\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance\r\n can be used as the argument.\r\n feed_forward (torch.nn.Module): Feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n conv_module (torch.nn.Module): Convolution module instance.\r\n `ConvlutionModule` instance can be used as the argument.\r\n dropout_rate (float): Dropout rate.\r\n normalize_before (bool): Whether to use layer_norm before the first block.\r\n concat_after (bool): Whether to concat attention layer's input and output.\r\n if True, additional linear will be applied.\r\n i.e. x -> x + linear(concat(x, att(x)))\r\n if False, no additional linear will be applied. i.e. x -> x + att(x)\r\n stochastic_depth_rate (float): Proability to skip this layer.\r\n During training, the layer may skip residual computation and return input\r\n as-is with given probability.\r\n\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n size,\r\n self_attn,\r\n feed_forward,\r\n feed_forward_macaron,\r\n conv_module,\r\n dropout_rate,\r\n normalize_before=True,\r\n concat_after=False,\r\n stochastic_depth_rate=0.0,\r\n ):\r\n \"\"\"Construct an EncoderLayer object.\"\"\"\r\n super(EncoderLayer, self).__init__()\r\n self.self_attn = self_attn\r\n self.feed_forward = feed_forward\r\n self.feed_forward_macaron = feed_forward_macaron\r\n self.conv_module = conv_module\r\n self.norm_ff = LayerNorm(size) # for the FNN module\r\n self.norm_mha = LayerNorm(size) # for the MHA module\r\n if feed_forward_macaron is not None:\r\n self.norm_ff_macaron = LayerNorm(size)\r\n self.ff_scale = 0.5\r\n else:\r\n self.ff_scale = 1.0\r\n if self.conv_module is not None:\r\n self.norm_conv = LayerNorm(size) # for the CNN module\r\n self.norm_final = LayerNorm(size) # for the final output of the block\r\n self.dropout = nn.Dropout(dropout_rate)\r\n self.size = size\r\n self.normalize_before = normalize_before\r\n self.concat_after = concat_after\r\n if self.concat_after:\r\n self.concat_linear = nn.Linear(size + size, size)\r\n self.stochastic_depth_rate = stochastic_depth_rate\r\n\r\n def forward(self, x_input, mask, cache=None):\r\n \"\"\"Compute encoded features.\r\n\r\n Args:\r\n x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.\r\n - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].\r\n - w/o pos emb: Tensor (#batch, time, size).\r\n mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).\r\n cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, size).\r\n torch.Tensor: Mask tensor (#batch, 1, time).\r\n\r\n \"\"\"\r\n if isinstance(x_input, tuple):\r\n x, pos_emb = x_input[0], x_input[1]\r\n else:\r\n x, pos_emb = x_input, None\r\n\r\n skip_layer = False\r\n # with stochastic depth, residual connection `x + f(x)` becomes\r\n # `x <- x + 1 / (1 - p) * f(x)` at training time.\r\n stoch_layer_coeff = 1.0\r\n if self.training and self.stochastic_depth_rate > 0:\r\n skip_layer = torch.rand(1).item() < self.stochastic_depth_rate\r\n stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)\r\n\r\n if skip_layer:\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n return x, mask\r\n\r\n # whether to use macaron style\r\n if self.feed_forward_macaron is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward_macaron(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n\r\n # convolution module\r\n \"\"\"\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n \"\"\"\r\n\r\n # multi-headed self-attention module\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n if cache is None:\r\n x_q = x\r\n else:\r\n assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)\r\n x_q = x[:, -1:, :]\r\n residual = residual[:, -1:, :]\r\n mask = None if mask is None else mask[:, -1:, :]\r\n\r\n if pos_emb is not None:\r\n x_att = self.self_attn(x_q, x, x, pos_emb, mask)\r\n else:\r\n x_att = self.self_attn(x_q, x, x, mask)\r\n\r\n if self.concat_after:\r\n x_concat = torch.cat((x, x_att), dim=-1)\r\n x = residual + stoch_layer_coeff * self.concat_linear(x_concat)\r\n else:\r\n x = residual + stoch_layer_coeff * self.dropout(x_att)\r\n if not self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n # convolution module\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n\r\n # feed forward module\r\n if self.feed_forward:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff(x)\r\n else:\r\n raise ValueError(\"not exit\")\r\n\r\n if self.conv_module is not None:\r\n x = self.norm_final(x)\r\n\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n\r\n return x, mask\r" }, { "identifier": "get_activation", "path": "src/clap_module/conformer/modules.py", "snippet": "def get_activation(act):\r\n \"\"\"Return activation function.\r\n \"\"\"\r\n # Lazy load to avoid unused import\r\n\r\n activation_funcs = {\r\n \"hardtanh\": torch.nn.Hardtanh,\r\n \"tanh\": torch.nn.Tanh,\r\n \"relu\": torch.nn.ReLU,\r\n \"selu\": torch.nn.SELU,\r\n \"swish\": Swish,\r\n }\r\n\r\n return activation_funcs[act]()\r" }, { "identifier": "VGG2L", "path": "src/clap_module/conformer/modules.py", "snippet": "class VGG2L(torch.nn.Module):\r\n \"\"\"VGG2L module for custom encoder.\r\n\r\n Args:\r\n idim: Input dimension.\r\n odim: Output dimension.\r\n pos_enc: Positional encoding class.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim: int, odim: int, pos_enc: torch.nn.Module = None):\r\n \"\"\"Construct a VGG2L object.\"\"\"\r\n super().__init__()\r\n\r\n self.vgg2l = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(64, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((3, 2)),\r\n torch.nn.Conv2d(64, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(128, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((2, 2)),\r\n )\r\n\r\n if pos_enc is not None:\r\n self.output = torch.nn.Sequential(\r\n torch.nn.Linear(128 * ((idim // 2) // 2), odim), pos_enc\r\n )\r\n else:\r\n self.output = torch.nn.Linear(128 * ((idim // 2) // 2), odim)\r\n\r\n def forward(\r\n self, feats: torch.Tensor, feats_mask: torch.Tensor\r\n ) -> Union[\r\n Tuple[torch.Tensor, torch.Tensor],\r\n Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor],\r\n ]:\r\n \"\"\"Forward VGG2L bottleneck.\r\n\r\n Args:\r\n feats: Feature sequences. (B, F, D_feats)\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_output: VGG output sequences.\r\n (B, sub(F), D_out) or ((B, sub(F), D_out), (B, sub(F), D_att))\r\n vgg_mask: Mask of VGG output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n feats = feats.unsqueeze(1)\r\n vgg_output = self.vgg2l(feats)\r\n\r\n b, c, t, f = vgg_output.size()\r\n\r\n vgg_output = self.output(\r\n vgg_output.transpose(1, 2).contiguous().view(b, t, c * f)\r\n )\r\n\r\n if feats_mask is not None:\r\n vgg_mask = self.create_new_mask(feats_mask)\r\n else:\r\n vgg_mask = feats_mask\r\n\r\n return vgg_output, vgg_mask\r\n\r\n def create_new_mask(self, feats_mask: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Create a subsampled mask of feature sequences.\r\n\r\n Args:\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_mask: Mask of VGG2L output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n vgg1_t_len = feats_mask.size(2) - (feats_mask.size(2) % 3)\r\n vgg_mask = feats_mask[:, :, :vgg1_t_len][:, :, ::3]\r\n\r\n vgg2_t_len = vgg_mask.size(2) - (vgg_mask.size(2) % 2)\r\n vgg_mask = vgg_mask[:, :, :vgg2_t_len][:, :, ::2]\r\n\r\n return vgg_mask\r" }, { "identifier": "LegacyRelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)))\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, time1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "MultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class MultiHeadedAttention(nn.Module):\r\n \"\"\"Multi-Head Attention layer.\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate):\r\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\r\n super(MultiHeadedAttention, self).__init__()\r\n assert n_feat % n_head == 0\r\n # We assume d_v always equals d_k\r\n self.d_k = n_feat // n_head\r\n self.h = n_head\r\n self.linear_q = nn.Linear(n_feat, n_feat)\r\n self.linear_k = nn.Linear(n_feat, n_feat)\r\n self.linear_v = nn.Linear(n_feat, n_feat)\r\n self.linear_out = nn.Linear(n_feat, n_feat)\r\n self.attn = None\r\n self.dropout = nn.Dropout(p=dropout_rate)\r\n\r\n def forward_qkv(self, query, key, value):\r\n \"\"\"Transform query, key and value.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n\r\n Returns:\r\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\r\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\r\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\r\n\r\n \"\"\"\r\n n_batch = query.size(0)\r\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\r\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\r\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\r\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\r\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\r\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\r\n\r\n return q, k, v\r\n\r\n def forward_attention(self, value, scores, mask):\r\n \"\"\"Compute attention context vector.\r\n\r\n Args:\r\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\r\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\r\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Transformed value (#batch, time1, d_model)\r\n weighted by the attention score (#batch, time1, time2).\r\n\r\n \"\"\"\r\n n_batch = value.size(0)\r\n if mask is not None:\r\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\r\n min_value = torch.finfo(scores.dtype).min\r\n scores = scores.masked_fill(mask, min_value)\r\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\r\n mask, 0.0\r\n ) # (batch, head, time1, time2)\r\n else:\r\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\r\n\r\n p_attn = self.dropout(self.attn)\r\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\r\n x = (\r\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\r\n ) # (batch, time1, d_model)\r\n\r\n return self.linear_out(x) # (batch, time1, d_model)\r\n\r\n def forward(self, query, key, value, mask):\r\n \"\"\"Compute scaled dot product attention.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).\r\n time1 means the length of query vector.\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)[\r\n :, :, :, : x.size(-1) // 2 + 1\r\n ] # only keep the positions from 0 to time2\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor\r\n (#batch, 2*time1-1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, 2*time1-1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "LegacyRelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class LegacyRelPositionalEncoding(PositionalEncoding):\r\n \"\"\"Relative positional encoding module (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(\r\n d_model=d_model,\r\n dropout_rate=dropout_rate,\r\n max_len=max_len,\r\n reverse=True,\r\n )\r\n\r\n def forward(self, x):\r\n \"\"\"Compute positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n torch.Tensor: Positional embedding tensor (1, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[:, : x.size(1)]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "PositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\r\n \"\"\"Positional encoding.\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n reverse (bool): Whether to reverse the input position. Only for\r\n the class LegacyRelPositionalEncoding. We remove it in the current\r\n class RelPositionalEncoding.\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(PositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.reverse = reverse\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n self._register_load_state_dict_pre_hook(_pre_hook)\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n if self.pe.size(1) >= x.size(1):\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n pe = torch.zeros(x.size(1), self.d_model)\r\n if self.reverse:\r\n position = torch.arange(\r\n x.size(1) - 1, -1, -1.0, dtype=torch.float32\r\n ).unsqueeze(1)\r\n else:\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale + self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "RelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class RelPositionalEncoding(torch.nn.Module):\r\n \"\"\"Relative positional encoding module (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(RelPositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n # self.pe contains both positive and negative parts\r\n # the length of self.pe is 2 * input_len - 1\r\n if self.pe.size(1) >= x.size(1) * 2 - 1:\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n # Suppose `i` means to the position of query vecotr and `j` means the\r\n # position of key vector. We use position relative positions when keys\r\n # are to the left (i>j) and negative relative positions otherwise (i<j).\r\n pe_positive = torch.zeros(x.size(1), self.d_model)\r\n pe_negative = torch.zeros(x.size(1), self.d_model)\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe_positive[:, 0::2] = torch.sin(position * div_term)\r\n pe_positive[:, 1::2] = torch.cos(position * div_term)\r\n pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)\r\n pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)\r\n\r\n # Reserve the order of positive indices and concat both positive and\r\n # negative indices. This is used to support the shifting trick\r\n # as in https://arxiv.org/abs/1901.02860\r\n pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)\r\n pe_negative = pe_negative[1:].unsqueeze(0)\r\n pe = torch.cat([pe_positive, pe_negative], dim=1)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[\r\n :,\r\n self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1),\r\n ]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "ScaledPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class ScaledPositionalEncoding(PositionalEncoding):\r\n \"\"\"Scaled positional encoding module.\r\n\r\n See Sec. 3.2 https://arxiv.org/abs/1809.08895\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)\r\n self.alpha = torch.nn.Parameter(torch.tensor(1.0))\r\n\r\n def reset_parameters(self):\r\n \"\"\"Reset parameters.\"\"\"\r\n self.alpha.data = torch.tensor(1.0)\r\n\r\n def forward(self, x):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x + self.alpha * self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "LayerNorm", "path": "src/clap_module/conformer/modules.py", "snippet": "class LayerNorm(torch.nn.LayerNorm):\r\n \"\"\"Layer normalization module.\r\n\r\n Args:\r\n nout (int): Output dim size.\r\n dim (int): Dimension to be normalized.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, nout, dim=-1):\r\n \"\"\"Construct an LayerNorm object.\"\"\"\r\n super(LayerNorm, self).__init__(nout, eps=1e-12)\r\n self.dim = dim\r\n\r\n def forward(self, x):\r\n \"\"\"Apply layer normalization.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor.\r\n\r\n Returns:\r\n torch.Tensor: Normalized tensor.\r\n\r\n \"\"\"\r\n if self.dim == -1:\r\n return super(LayerNorm, self).forward(x)\r\n return (\r\n super(LayerNorm, self)\r\n .forward(x.transpose(self.dim, -1))\r\n .transpose(self.dim, -1)\r\n )\r" }, { "identifier": "Conv1dLinear", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class Conv1dLinear(torch.nn.Module):\r\n \"\"\"Conv1D + Linear for Transformer block.\r\n\r\n A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize Conv1dLinear module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(Conv1dLinear, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Linear(hidden_chans, in_chans)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x))\r" }, { "identifier": "MultiLayeredConv1d", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class MultiLayeredConv1d(torch.nn.Module):\r\n \"\"\"Multi-layered conv1d for Transformer block.\r\n\r\n This is a module of multi-leyered conv1d designed\r\n to replace positionwise feed-forward network\r\n in Transforner block, which is introduced in\r\n `FastSpeech: Fast, Robust and Controllable Text to Speech`_.\r\n\r\n .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:\r\n https://arxiv.org/pdf/1905.09263.pdf\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize MultiLayeredConv1d module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(MultiLayeredConv1d, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Conv1d(\r\n hidden_chans,\r\n in_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)\r" }, { "identifier": "PositionwiseFeedForward", "path": "src/clap_module/conformer/modules.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\r\n \"\"\"Positionwise feed forward layer.\r\n\r\n Args:\r\n idim (int): Input dimenstion.\r\n hidden_units (int): The number of hidden units.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):\r\n \"\"\"Construct an PositionwiseFeedForward object.\"\"\"\r\n super(PositionwiseFeedForward, self).__init__()\r\n self.w_1 = torch.nn.Linear(idim, hidden_units)\r\n self.w_2 = torch.nn.Linear(hidden_units, idim)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Forward function.\"\"\"\r\n return self.w_2(self.dropout(self.activation(self.w_1(x))))\r" }, { "identifier": "repeat", "path": "src/clap_module/conformer/modules.py", "snippet": "def repeat(N, fn, layer_drop_rate=0.0):\r\n \"\"\"Repeat module N times.\r\n\r\n Args:\r\n N (int): Number of repeat time.\r\n fn (Callable): Function to generate module.\r\n layer_drop_rate (float): Probability of dropping out each fn (layer).\r\n\r\n Returns:\r\n MultiSequential: Repeated model instance.\r\n\r\n \"\"\"\r\n return MultiSequential(*[fn(n) for n in range(N)], layer_drop_rate=layer_drop_rate)\r" }, { "identifier": "Conv2dSubsampling", "path": "src/clap_module/conformer/sub_sampling.py", "snippet": "class Conv2dSubsampling(torch.nn.Module):\r\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\r\n\r\n Args:\r\n idim (int): Input dimension.\r\n odim (int): Output dimension.\r\n dropout_rate (float): Dropout rate.\r\n pos_enc (torch.nn.Module): Custom position encoding layer.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\r\n \"\"\"Construct an Conv2dSubsampling object.\"\"\"\r\n super(Conv2dSubsampling, self).__init__()\r\n self.conv = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(odim, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n )\r\n self.out = torch.nn.Sequential(\r\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),\r\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\r\n )\r\n\r\n def forward(self, x, x_mask):\r\n \"\"\"Subsample x.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, idim).\r\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\r\n\r\n Returns:\r\n torch.Tensor: Subsampled tensor (#batch, time', odim),\r\n where time' = time // 4.\r\n torch.Tensor: Subsampled mask (#batch, 1, time'),\r\n where time' = time // 4.\r\n\r\n \"\"\"\r\n x = x.unsqueeze(1) # (b, c, t, f)\r\n x = self.conv(x)\r\n b, c, t, f = x.size()\r\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\r\n if x_mask is None:\r\n return x, None\r\n return x, x_mask[:, :, :-2:2][:, :, :-2:2]\r\n\r\n def __getitem__(self, key):\r\n \"\"\"Get item.\r\n\r\n When reset_parameters() is called, if use_scaled_pos_enc is used,\r\n return the positioning encoding.\r\n\r\n \"\"\"\r\n if key != -1:\r\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\r\n return self.out[key]\r" }, { "identifier": "AttentionPool1d", "path": "src/clap_module/feature_fusion.py", "snippet": "class AttentionPool1d(nn.Module):\r\n def __init__(\r\n self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None\r\n ):\r\n super().__init__()\r\n self.positional_embedding = nn.Parameter(\r\n torch.randn(spacial_dim + 1, embed_dim) / embed_dim\r\n # torch.randn(spacial_dim, embed_dim) / embed_dim\r\n )\r\n self.k_proj = nn.Linear(embed_dim, embed_dim)\r\n self.q_proj = nn.Linear(embed_dim, embed_dim)\r\n self.v_proj = nn.Linear(embed_dim, embed_dim)\r\n self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)\r\n self.num_heads = num_heads\r\n\r\n def forward(self, x):\r\n # import pdb; pdb.set_trace()\r\n x = x.permute(1, 0, 2) # B*L*D -> L*B*D; NCHW -> (HW)NC\r\n x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC\r\n x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC\r\n x, _ = F.multi_head_attention_forward(\r\n query=x,\r\n key=x,\r\n value=x,\r\n embed_dim_to_check=x.shape[-1],\r\n num_heads=self.num_heads,\r\n q_proj_weight=self.q_proj.weight,\r\n k_proj_weight=self.k_proj.weight,\r\n v_proj_weight=self.v_proj.weight,\r\n in_proj_weight=None,\r\n in_proj_bias=torch.cat(\r\n [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]\r\n ),\r\n bias_k=None,\r\n bias_v=None,\r\n add_zero_attn=False,\r\n dropout_p=0,\r\n out_proj_weight=self.c_proj.weight,\r\n out_proj_bias=self.c_proj.bias,\r\n use_separate_proj_weight=True,\r\n training=self.training,\r\n need_weights=False,\r\n )\r\n\r\n return x[0] # B*D\r" }, { "identifier": "DAF", "path": "src/clap_module/feature_fusion.py", "snippet": "class DAF(nn.Module):\r\n \"\"\"直接相加 DirectAddFuse\r\n \"\"\"\r\n\r\n def __init__(self):\r\n super(DAF, self).__init__()\r\n\r\n def forward(self, x, residual):\r\n return x + residual\r" }, { "identifier": "AFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class AFF(nn.Module):\r\n \"\"\"多特征融合 AFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(AFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported.'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xo = 2 * x * wei + 2 * residual * (1 - wei)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" }, { "identifier": "iAFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class iAFF(nn.Module):\r\n \"\"\"多特征融合 iAFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(iAFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xi = x * wei + residual * (1 - wei)\r\n\r\n xl2 = self.local_att2(xi)\r\n xg2 = self.global_att(xi)\r\n xlg2 = xl2 + xg2\r\n wei2 = self.sigmoid(xlg2)\r\n xo = x * wei2 + residual * (1 - wei2)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" } ]
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
14,705
selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention") encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "legacy_rel_selfattn": assert pos_enc_layer_type == "legacy_rel_pos" encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "rel_selfattn": logging.info("encoder self-attention layer type = relative self-attention") assert pos_enc_layer_type == "rel_pos"
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention") encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "legacy_rel_selfattn": assert pos_enc_layer_type == "legacy_rel_pos" encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "rel_selfattn": logging.info("encoder self-attention layer type = relative self-attention") assert pos_enc_layer_type == "rel_pos"
encoder_selfattn_layer = RelPositionMultiHeadedAttention
6
2023-11-25 02:38:32+00:00
24k
Luo-Z13/pointobb
PointOBB/mmdet/models/roi_heads/PointOBB_head.py
[ { "identifier": "HEADS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "HEADS = MODELS" }, { "identifier": "MODELS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "MODELS = Registry('models', parent=MMCV_MODELS)" }, { "identifier": "build_head", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_head(cfg):\n \"\"\"Build head.\"\"\"\n return HEADS.build(cfg)" }, { "identifier": "build_roi_extractor", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_roi_extractor(cfg):\n \"\"\"Build roi extractor.\"\"\"\n return ROI_EXTRACTORS.build(cfg)" }, { "identifier": "build_loss", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n return LOSSES.build(cfg)" }, { "identifier": "StandardRoIHead", "path": "PointOBB/mmdet/models/roi_heads/standard_roi_head.py", "snippet": "class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):\n \"\"\"Simplest base roi head including one bbox head and one mask head.\"\"\"\n\n def init_assigner_sampler(self):\n \"\"\"Initialize assigner and sampler.\"\"\"\n self.bbox_assigner = None\n self.bbox_sampler = None\n if self.train_cfg:\n self.bbox_assigner = build_assigner(self.train_cfg.assigner)\n self.bbox_sampler = build_sampler(\n self.train_cfg.sampler, context=self)\n\n def init_bbox_head(self, bbox_roi_extractor, bbox_head):\n \"\"\"Initialize ``bbox_head``\"\"\"\n self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)\n self.bbox_head = build_head(bbox_head)\n\n def init_mask_head(self, mask_roi_extractor, mask_head):\n \"\"\"Initialize ``mask_head``\"\"\"\n if mask_roi_extractor is not None:\n self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)\n self.share_roi_extractor = False\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n self.mask_head = build_head(mask_head)\n\n def forward_dummy(self, x, proposals):\n \"\"\"Dummy forward function.\"\"\"\n # bbox head\n outs = ()\n rois = bbox2roi([proposals])\n if self.with_bbox:\n bbox_results = self._bbox_forward(x, rois)\n outs = outs + (bbox_results['cls_score'],\n bbox_results['bbox_pred'])\n # mask head\n if self.with_mask:\n mask_rois = rois[:100]\n mask_results = self._mask_forward(x, mask_rois)\n outs = outs + (mask_results['mask_pred'], )\n return outs\n\n def forward_train(self,\n x,\n img_metas,\n proposal_list,\n gt_bboxes,\n gt_labels,\n ann_weight,\n gt_bboxes_ignore=None,\n gt_masks=None):\n \"\"\"\n Args:\n x (list[Tensor]): list of multi-level img features.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n proposals (list[Tensors]): list of region proposals.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n # assign gts and sample proposals\n if self.with_bbox or self.with_mask:\n num_imgs = len(img_metas)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n for i in range(num_imgs):\n assign_result = self.bbox_assigner.assign(\n proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = self.bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n losses = dict()\n # bbox head forward and loss\n if self.with_bbox:\n bbox_results = self._bbox_forward_train(x, sampling_results,\n gt_bboxes, gt_labels,ann_weight, #add by fei\n img_metas)\n losses.update(bbox_results['loss_bbox'])\n\n # mask head forward and loss\n if self.with_mask:\n mask_results = self._mask_forward_train(x, sampling_results,\n bbox_results['bbox_feats'],\n gt_masks, img_metas)\n losses.update(mask_results['loss_mask'])\n\n return losses\n\n def _bbox_forward(self, x, rois):\n \"\"\"Box head forward function used in both training and testing.\"\"\"\n # TODO: a more flexible way to decide which feature maps to use\n bbox_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n cls_score, bbox_pred = self.bbox_head(bbox_feats)\n\n bbox_results = dict(\n cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n return bbox_results\n\n def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, ann_weight,\n img_metas):\n \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_results = self._bbox_forward(x, rois)\n\n bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,\n gt_labels,ann_weight, self.train_cfg) ## add by fei\n loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],\n bbox_results['bbox_pred'], rois,\n *bbox_targets)\n\n bbox_results.update(loss_bbox=loss_bbox)\n return bbox_results\n\n def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,\n img_metas):\n \"\"\"Run forward function and calculate loss for mask head in\n training.\"\"\"\n if not self.share_roi_extractor:\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n mask_results = self._mask_forward(x, pos_rois)\n else:\n pos_inds = []\n device = bbox_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds = torch.cat(pos_inds)\n\n mask_results = self._mask_forward(\n x, pos_inds=pos_inds, bbox_feats=bbox_feats)\n\n mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,\n self.train_cfg)\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head.loss(mask_results['mask_pred'],\n mask_targets, pos_labels)\n\n mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)\n return mask_results\n\n def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):\n \"\"\"Mask head forward function used in both training and testing.\"\"\"\n assert ((rois is not None) ^\n (pos_inds is not None and bbox_feats is not None))\n if rois is not None:\n mask_feats = self.mask_roi_extractor(\n x[:self.mask_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n assert bbox_feats is not None\n mask_feats = bbox_feats[pos_inds]\n\n mask_pred = self.mask_head(mask_feats)\n mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)\n return mask_results\n\n async def async_simple_test(self,\n x,\n proposal_list,\n img_metas,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n det_bboxes, det_labels = await self.async_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n bbox_results = bbox2result(det_bboxes, det_labels,\n self.bbox_head.num_classes)\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = await self.async_test_mask(\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=rescale,\n mask_test_cfg=self.test_cfg.get('mask'))\n return bbox_results, segm_results\n\n def simple_test(self,\n x,\n proposal_list,\n img_metas,\n proposals=None,\n rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n det_bboxes, det_labels = self.simple_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n\n bbox_results = [\n bbox2result(det_bboxes[i], det_labels[i],\n self.bbox_head.num_classes)\n for i in range(len(det_bboxes))\n ]\n\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = self.simple_test_mask(\n x, img_metas, det_bboxes, det_labels, rescale=rescale)\n return list(zip(bbox_results, segm_results))\n\n def aug_test(self, x, proposal_list, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,\n proposal_list,\n self.test_cfg)\n if rescale:\n _det_bboxes = det_bboxes\n else:\n _det_bboxes = det_bboxes.clone()\n _det_bboxes[:, :4] *= det_bboxes.new_tensor(\n img_metas[0][0]['scale_factor'])\n bbox_results = bbox2result(_det_bboxes, det_labels,\n self.bbox_head.num_classes)\n\n # det_bboxes always keep the original scale\n if self.with_mask:\n segm_results = self.aug_test_mask(x, img_metas, det_bboxes,\n det_labels)\n return [(bbox_results, segm_results)]\n else:\n return [bbox_results]\n\n def onnx_export(self, x, proposals, img_metas, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n det_bboxes, det_labels = self.bbox_onnx_export(\n x, img_metas, proposals, self.test_cfg, rescale=rescale)\n\n if not self.with_mask:\n return det_bboxes, det_labels\n else:\n segm_results = self.mask_onnx_export(\n x, img_metas, det_bboxes, det_labels, rescale=rescale)\n return det_bboxes, det_labels, segm_results\n\n def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs):\n \"\"\"Export mask branch to onnx which supports batch inference.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n det_bboxes (Tensor): Bboxes and corresponding scores.\n has shape [N, num_bboxes, 5].\n det_labels (Tensor): class labels of\n shape [N, num_bboxes].\n\n Returns:\n tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]\n and class labels of shape [N, num_bboxes].\n \"\"\"\n # image shapes of images in the batch\n\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n raise RuntimeError('[ONNX Error] Can not record MaskHead '\n 'as it has not been executed this time')\n batch_size = det_bboxes.size(0)\n # if det_bboxes is rescaled to the original image size, we need to\n # rescale it back to the testing scale to obtain RoIs.\n det_bboxes = det_bboxes[..., :4]\n batch_index = torch.arange(\n det_bboxes.size(0), device=det_bboxes.device).float().view(\n -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)\n mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)\n mask_rois = mask_rois.view(-1, 5)\n mask_results = self._mask_forward(x, mask_rois)\n mask_pred = mask_results['mask_pred']\n max_shape = img_metas[0]['img_shape_for_onnx']\n num_det = det_bboxes.shape[1]\n det_bboxes = det_bboxes.reshape(-1, 4)\n det_labels = det_labels.reshape(-1)\n segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes,\n det_labels, self.test_cfg,\n max_shape)\n segm_results = segm_results.reshape(batch_size, num_det, max_shape[0],\n max_shape[1])\n return segm_results\n\n def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg,\n **kwargs):\n \"\"\"Export bbox branch to onnx which supports batch inference.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n proposals (Tensor): Region proposals with\n batch dimension, has shape [N, num_bboxes, 5].\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n\n Returns:\n tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]\n and class labels of shape [N, num_bboxes].\n \"\"\"\n # get origin input shape to support onnx dynamic input shape\n assert len(\n img_metas\n ) == 1, 'Only support one input image while in exporting to ONNX'\n img_shapes = img_metas[0]['img_shape_for_onnx']\n\n rois = proposals\n batch_index = torch.arange(\n rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(\n rois.size(0), rois.size(1), 1)\n rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n # Eliminate the batch dimension\n rois = rois.view(-1, 5)\n bbox_results = self._bbox_forward(x, rois)\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n\n # Recover the batch dimension\n rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n cls_score.size(-1))\n\n bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,\n bbox_pred.size(-1))\n det_bboxes, det_labels = self.bbox_head.onnx_export(\n rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg)\n\n return det_bboxes, det_labels" }, { "identifier": "CascadeRoIHead", "path": "PointOBB/mmdet/models/roi_heads/cascade_roi_head.py", "snippet": "class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):\n \"\"\"Cascade roi head including one bbox head and one mask head.\n\n https://arxiv.org/abs/1712.00726\n \"\"\"\n\n def __init__(self,\n num_stages,\n stage_loss_weights,\n bbox_roi_extractor=None,\n bbox_head=None,\n mask_roi_extractor=None,\n mask_head=None,\n shared_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n assert bbox_roi_extractor is not None\n assert bbox_head is not None\n assert shared_head is None, \\\n 'Shared head is not supported in Cascade RCNN anymore'\n\n self.num_stages = num_stages\n self.stage_loss_weights = stage_loss_weights\n super(CascadeRoIHead, self).__init__(\n bbox_roi_extractor=bbox_roi_extractor,\n bbox_head=bbox_head,\n mask_roi_extractor=mask_roi_extractor,\n mask_head=mask_head,\n shared_head=shared_head,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n pretrained=pretrained,\n init_cfg=init_cfg)\n\n def init_bbox_head(self, bbox_roi_extractor, bbox_head):\n \"\"\"Initialize box head and box roi extractor.\n\n Args:\n bbox_roi_extractor (dict): Config of box roi extractor.\n bbox_head (dict): Config of box in box head.\n \"\"\"\n self.bbox_roi_extractor = ModuleList()\n self.bbox_head = ModuleList()\n if not isinstance(bbox_roi_extractor, list):\n bbox_roi_extractor = [\n bbox_roi_extractor for _ in range(self.num_stages)\n ]\n if not isinstance(bbox_head, list):\n bbox_head = [bbox_head for _ in range(self.num_stages)]\n assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages\n for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):\n self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))\n self.bbox_head.append(build_head(head))\n\n def init_mask_head(self, mask_roi_extractor, mask_head):\n \"\"\"Initialize mask head and mask roi extractor.\n\n Args:\n mask_roi_extractor (dict): Config of mask roi extractor.\n mask_head (dict): Config of mask in mask head.\n \"\"\"\n self.mask_head = nn.ModuleList()\n if not isinstance(mask_head, list):\n mask_head = [mask_head for _ in range(self.num_stages)]\n assert len(mask_head) == self.num_stages\n for head in mask_head:\n self.mask_head.append(build_head(head))\n if mask_roi_extractor is not None:\n self.share_roi_extractor = False\n self.mask_roi_extractor = ModuleList()\n if not isinstance(mask_roi_extractor, list):\n mask_roi_extractor = [\n mask_roi_extractor for _ in range(self.num_stages)\n ]\n assert len(mask_roi_extractor) == self.num_stages\n for roi_extractor in mask_roi_extractor:\n self.mask_roi_extractor.append(\n build_roi_extractor(roi_extractor))\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n\n def init_assigner_sampler(self):\n \"\"\"Initialize assigner and sampler for each stage.\"\"\"\n self.bbox_assigner = []\n self.bbox_sampler = []\n if self.train_cfg is not None:\n for idx, rcnn_train_cfg in enumerate(self.train_cfg):\n self.bbox_assigner.append(\n build_assigner(rcnn_train_cfg.assigner))\n self.current_stage = idx\n self.bbox_sampler.append(\n build_sampler(rcnn_train_cfg.sampler, context=self))\n\n def forward_dummy(self, x, proposals):\n \"\"\"Dummy forward function.\"\"\"\n # bbox head\n outs = ()\n rois = bbox2roi([proposals])\n if self.with_bbox:\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n outs = outs + (bbox_results['cls_score'],\n bbox_results['bbox_pred'])\n # mask heads\n if self.with_mask:\n mask_rois = rois[:100]\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n outs = outs + (mask_results['mask_pred'], )\n return outs\n\n def _bbox_forward(self, stage, x, rois):\n \"\"\"Box head forward function used in both training and testing.\"\"\"\n bbox_roi_extractor = self.bbox_roi_extractor[stage]\n bbox_head = self.bbox_head[stage]\n bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n cls_score, bbox_pred = bbox_head(bbox_feats)\n\n bbox_results = dict(\n cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n return bbox_results\n\n def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,\n gt_labels, rcnn_train_cfg):\n \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_results = self._bbox_forward(stage, x, rois)\n bbox_targets = self.bbox_head[stage].get_targets(\n sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)\n loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],\n bbox_results['bbox_pred'], rois,\n *bbox_targets)\n\n bbox_results.update(\n loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)\n return bbox_results\n\n def _mask_forward(self, stage, x, rois):\n \"\"\"Mask head forward function used in both training and testing.\"\"\"\n mask_roi_extractor = self.mask_roi_extractor[stage]\n mask_head = self.mask_head[stage]\n mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n mask_pred = mask_head(mask_feats)\n\n mask_results = dict(mask_pred=mask_pred)\n return mask_results\n\n def _mask_forward_train(self,\n stage,\n x,\n sampling_results,\n gt_masks,\n rcnn_train_cfg,\n bbox_feats=None):\n \"\"\"Run forward function and calculate loss for mask head in\n training.\"\"\"\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n mask_results = self._mask_forward(stage, x, pos_rois)\n\n mask_targets = self.mask_head[stage].get_targets(\n sampling_results, gt_masks, rcnn_train_cfg)\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],\n mask_targets, pos_labels)\n\n mask_results.update(loss_mask=loss_mask)\n return mask_results\n\n def forward_train(self,\n x,\n img_metas,\n proposal_list,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None):\n \"\"\"\n Args:\n x (list[Tensor]): list of multi-level img features.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n proposals (list[Tensors]): list of region proposals.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n losses = dict()\n for i in range(self.num_stages):\n self.current_stage = i\n rcnn_train_cfg = self.train_cfg[i]\n lw = self.stage_loss_weights[i]\n\n # assign gts and sample proposals\n sampling_results = []\n if self.with_bbox or self.with_mask:\n bbox_assigner = self.bbox_assigner[i]\n bbox_sampler = self.bbox_sampler[i]\n num_imgs = len(img_metas)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n\n for j in range(num_imgs):\n assign_result = bbox_assigner.assign(\n proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],\n gt_labels[j])\n sampling_result = bbox_sampler.sample(\n assign_result,\n proposal_list[j],\n gt_bboxes[j],\n gt_labels[j],\n feats=[lvl_feat[j][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n bbox_results = self._bbox_forward_train(i, x, sampling_results,\n gt_bboxes, gt_labels,\n rcnn_train_cfg)\n\n for name, value in bbox_results['loss_bbox'].items():\n losses[f's{i}.{name}'] = (\n value * lw if 'loss' in name else value)\n\n # mask head forward and loss\n if self.with_mask:\n mask_results = self._mask_forward_train(\n i, x, sampling_results, gt_masks, rcnn_train_cfg,\n bbox_results['bbox_feats'])\n for name, value in mask_results['loss_mask'].items():\n losses[f's{i}.{name}'] = (\n value * lw if 'loss' in name else value)\n\n # refine bboxes\n if i < self.num_stages - 1:\n pos_is_gts = [res.pos_is_gt for res in sampling_results]\n # bbox_targets is a tuple\n roi_labels = bbox_results['bbox_targets'][0]\n with torch.no_grad():\n roi_labels = torch.where(\n roi_labels == self.bbox_head[i].num_classes,\n bbox_results['cls_score'][:, :-1].argmax(1),\n roi_labels)\n proposal_list = self.bbox_head[i].refine_bboxes(\n bbox_results['rois'], roi_labels,\n bbox_results['bbox_pred'], pos_is_gts, img_metas)\n\n return losses\n\n def simple_test(self, x, proposal_list, img_metas, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n num_imgs = len(proposal_list)\n img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n # \"ms\" in variable names means multi-stage\n ms_bbox_result = {}\n ms_segm_result = {}\n ms_scores = []\n rcnn_test_cfg = self.test_cfg\n\n rois = bbox2roi(proposal_list)\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n\n # split batch bbox prediction back to each image\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n num_proposals_per_img = tuple(\n len(proposals) for proposals in proposal_list)\n rois = rois.split(num_proposals_per_img, 0)\n cls_score = cls_score.split(num_proposals_per_img, 0)\n if isinstance(bbox_pred, torch.Tensor):\n bbox_pred = bbox_pred.split(num_proposals_per_img, 0)\n else:\n bbox_pred = self.bbox_head[i].bbox_pred_split(\n bbox_pred, num_proposals_per_img)\n ms_scores.append(cls_score)\n\n if i < self.num_stages - 1:\n bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]\n rois = torch.cat([\n self.bbox_head[i].regress_by_class(rois[j], bbox_label[j],\n bbox_pred[j],\n img_metas[j])\n for j in range(num_imgs)\n ])\n\n # average scores of each image by stages\n cls_score = [\n sum([score[i] for score in ms_scores]) / float(len(ms_scores))\n for i in range(num_imgs)\n ]\n\n # apply bbox post-processing to each image individually\n det_bboxes = []\n det_labels = []\n for i in range(num_imgs):\n det_bbox, det_label = self.bbox_head[-1].get_bboxes(\n rois[i],\n cls_score[i],\n bbox_pred[i],\n img_shapes[i],\n scale_factors[i],\n rescale=rescale,\n cfg=rcnn_test_cfg)\n det_bboxes.append(det_bbox)\n det_labels.append(det_label)\n\n if torch.onnx.is_in_onnx_export():\n return det_bboxes, det_labels\n bbox_results = [\n bbox2result(det_bboxes[i], det_labels[i],\n self.bbox_head[-1].num_classes)\n for i in range(num_imgs)\n ]\n ms_bbox_result['ensemble'] = bbox_results\n\n if self.with_mask:\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n mask_classes = self.mask_head[-1].num_classes\n segm_results = [[[] for _ in range(mask_classes)]\n for _ in range(num_imgs)]\n else:\n if rescale and not isinstance(scale_factors[0], float):\n scale_factors = [\n torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n for scale_factor in scale_factors\n ]\n _bboxes = [\n det_bboxes[i][:, :4] *\n scale_factors[i] if rescale else det_bboxes[i][:, :4]\n for i in range(len(det_bboxes))\n ]\n mask_rois = bbox2roi(_bboxes)\n num_mask_rois_per_img = tuple(\n _bbox.size(0) for _bbox in _bboxes)\n aug_masks = []\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n mask_pred = mask_results['mask_pred']\n # split batch mask prediction back to each image\n mask_pred = mask_pred.split(num_mask_rois_per_img, 0)\n aug_masks.append(\n [m.sigmoid().cpu().numpy() for m in mask_pred])\n\n # apply mask post-processing to each image individually\n segm_results = []\n for i in range(num_imgs):\n if det_bboxes[i].shape[0] == 0:\n segm_results.append(\n [[]\n for _ in range(self.mask_head[-1].num_classes)])\n else:\n aug_mask = [mask[i] for mask in aug_masks]\n merged_masks = merge_aug_masks(\n aug_mask, [[img_metas[i]]] * self.num_stages,\n rcnn_test_cfg)\n segm_result = self.mask_head[-1].get_seg_masks(\n merged_masks, _bboxes[i], det_labels[i],\n rcnn_test_cfg, ori_shapes[i], scale_factors[i],\n rescale)\n segm_results.append(segm_result)\n ms_segm_result['ensemble'] = segm_results\n\n if self.with_mask:\n results = list(\n zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))\n else:\n results = ms_bbox_result['ensemble']\n\n return results\n\n def aug_test(self, features, proposal_list, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n rcnn_test_cfg = self.test_cfg\n aug_bboxes = []\n aug_scores = []\n for x, img_meta in zip(features, img_metas):\n # only one image in the batch\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n\n proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n scale_factor, flip, flip_direction)\n # \"ms\" in variable names means multi-stage\n ms_scores = []\n\n rois = bbox2roi([proposals])\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n ms_scores.append(bbox_results['cls_score'])\n\n if i < self.num_stages - 1:\n bbox_label = bbox_results['cls_score'][:, :-1].argmax(\n dim=1)\n rois = self.bbox_head[i].regress_by_class(\n rois, bbox_label, bbox_results['bbox_pred'],\n img_meta[0])\n\n cls_score = sum(ms_scores) / float(len(ms_scores))\n bboxes, scores = self.bbox_head[-1].get_bboxes(\n rois,\n cls_score,\n bbox_results['bbox_pred'],\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None)\n aug_bboxes.append(bboxes)\n aug_scores.append(scores)\n\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img)\n\n bbox_result = bbox2result(det_bboxes, det_labels,\n self.bbox_head[-1].num_classes)\n\n if self.with_mask:\n if det_bboxes.shape[0] == 0:\n segm_result = [[]\n for _ in range(self.mask_head[-1].num_classes)]\n else:\n aug_masks = []\n aug_img_metas = []\n for x, img_meta in zip(features, img_metas):\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n scale_factor, flip, flip_direction)\n mask_rois = bbox2roi([_bboxes])\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n aug_masks.append(\n mask_results['mask_pred'].sigmoid().cpu().numpy())\n aug_img_metas.append(img_meta)\n merged_masks = merge_aug_masks(aug_masks, aug_img_metas,\n self.test_cfg)\n\n ori_shape = img_metas[0][0]['ori_shape']\n segm_result = self.mask_head[-1].get_seg_masks(\n merged_masks,\n det_bboxes,\n det_labels,\n rcnn_test_cfg,\n ori_shape,\n scale_factor=1.0,\n rescale=False)\n return [(bbox_result, segm_result)]\n else:\n return [bbox_result]" }, { "identifier": "BBoxTestMixin", "path": "PointOBB/mmdet/models/roi_heads/test_mixins.py", "snippet": "class BBoxTestMixin:\n\n if sys.version_info >= (3, 7):\n\n async def async_test_bboxes(self,\n x,\n img_metas,\n proposals,\n rcnn_test_cfg,\n rescale=False,\n **kwargs):\n \"\"\"Asynchronized test for box head without augmentation.\"\"\"\n rois = bbox2roi(proposals)\n roi_feats = self.bbox_roi_extractor(\n x[:len(self.bbox_roi_extractor.featmap_strides)], rois)\n if self.with_shared_head:\n roi_feats = self.shared_head(roi_feats)\n sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)\n\n async with completed(\n __name__, 'bbox_head_forward',\n sleep_interval=sleep_interval):\n cls_score, bbox_pred = self.bbox_head(roi_feats)\n\n img_shape = img_metas[0]['img_shape']\n scale_factor = img_metas[0]['scale_factor']\n det_bboxes, det_labels = self.bbox_head.get_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n return det_bboxes, det_labels\n\n def simple_test_bboxes(self,\n x,\n img_metas,\n proposals,\n rcnn_test_cfg,\n rescale=False):\n \"\"\"Test only det bboxes without augmentation.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n proposals (List[Tensor]): Region proposals.\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n\n Returns:\n tuple[list[Tensor], list[Tensor]]: The first list contains\n the boxes of the corresponding image in a batch, each\n tensor has the shape (num_boxes, 5) and last dimension\n 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor\n in the second list is the labels with shape (num_boxes, ).\n The length of both lists should be equal to batch_size.\n \"\"\"\n # get origin input shape to support onnx dynamic input shape\n\n img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n # The length of proposals of different batches may be different.\n # In order to form a batch, a padding operation is required.\n max_size = max([proposal.size(0) for proposal in proposals])\n # padding to form a batch\n for i, proposal in enumerate(proposals):\n supplement = proposal.new_full(\n (max_size - proposal.size(0), proposal.size(1)), 0)\n proposals[i] = torch.cat((supplement, proposal), dim=0)\n rois = torch.stack(proposals, dim=0)\n\n batch_index = torch.arange(\n rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(\n rois.size(0), rois.size(1), 1)\n rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n # Eliminate the batch dimension\n rois = rois.view(-1, 5)\n bbox_results = self._bbox_forward(x, rois)\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n\n # Recover the batch dimension\n rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n cls_score.size(-1))\n\n # remove padding, ignore batch_index when calculating mask\n supplement_mask = rois.abs()[..., 1:].sum(dim=-1) == 0\n cls_score[supplement_mask, :] = 0\n\n # bbox_pred would be None in some detector when with_reg is False,\n # e.g. Grid R-CNN.\n if bbox_pred is not None:\n # the bbox prediction of some detectors like SABL is not Tensor\n if isinstance(bbox_pred, torch.Tensor):\n bbox_pred = bbox_pred.reshape(batch_size,\n num_proposals_per_img,\n bbox_pred.size(-1))\n bbox_pred[supplement_mask, :] = 0\n else:\n # TODO: Looking forward to a better way\n # TODO move these special process to a corresponding head\n # For SABL\n bbox_preds = self.bbox_head.bbox_pred_split(\n bbox_pred, num_proposals_per_img)\n # apply bbox post-processing to each image individually\n det_bboxes = []\n det_labels = []\n for i in range(len(proposals)):\n # remove padding\n supplement_mask = proposals[i].abs().sum(dim=-1) == 0\n for bbox in bbox_preds[i]:\n bbox[supplement_mask] = 0\n det_bbox, det_label = self.bbox_head.get_bboxes(\n rois[i],\n cls_score[i],\n bbox_preds[i],\n img_shapes[i],\n scale_factors[i],\n rescale=rescale,\n cfg=rcnn_test_cfg)\n det_bboxes.append(det_bbox)\n det_labels.append(det_label)\n return det_bboxes, det_labels\n else:\n bbox_pred = None\n\n return self.bbox_head.get_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shapes,\n scale_factors,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n\n def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):\n \"\"\"Test det bboxes with test time augmentation.\"\"\"\n aug_bboxes = []\n aug_scores = []\n for x, img_meta in zip(feats, img_metas):\n # only one image in the batch\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n # TODO more flexible\n proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n scale_factor, flip, flip_direction, img_meta[0].get('tile_offset', None)) # add by hui\n rois = bbox2roi([proposals])\n bbox_results = self._bbox_forward(x, rois)\n bboxes, scores = self.bbox_head.get_bboxes(\n rois,\n bbox_results['cls_score'],\n bbox_results['bbox_pred'],\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None)\n aug_bboxes.append(bboxes)\n aug_scores.append(scores)\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img)\n return det_bboxes, det_labels" }, { "identifier": "MaskTestMixin", "path": "PointOBB/mmdet/models/roi_heads/test_mixins.py", "snippet": "class MaskTestMixin:\n\n if sys.version_info >= (3, 7):\n\n async def async_test_mask(self,\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=False,\n mask_test_cfg=None):\n \"\"\"Asynchronized test for mask head without augmentation.\"\"\"\n # image shape of the first image in the batch (only one)\n ori_shape = img_metas[0]['ori_shape']\n scale_factor = img_metas[0]['scale_factor']\n if det_bboxes.shape[0] == 0:\n segm_result = [[] for _ in range(self.mask_head.num_classes)]\n else:\n if rescale:\n scale_factor = det_bboxes.new_tensor(scale_factor)\n _bboxes = (\n det_bboxes[:, :4] *\n scale_factor if rescale else det_bboxes)\n mask_rois = bbox2roi([_bboxes])\n mask_feats = self.mask_roi_extractor(\n x[:len(self.mask_roi_extractor.featmap_strides)],\n mask_rois)\n\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):\n sleep_interval = mask_test_cfg['async_sleep_interval']\n else:\n sleep_interval = 0.035\n async with completed(\n __name__,\n 'mask_head_forward',\n sleep_interval=sleep_interval):\n mask_pred = self.mask_head(mask_feats)\n segm_result = self.mask_head.get_seg_masks(\n mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,\n scale_factor, rescale)\n return segm_result\n\n def simple_test_mask(self,\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=False):\n \"\"\"Simple test for mask head without augmentation.\"\"\"\n # image shapes of images in the batch\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n segm_results = [[[] for _ in range(self.mask_head.num_classes)]\n for _ in range(len(det_bboxes))]\n return segm_results\n\n # The length of proposals of different batches may be different.\n # In order to form a batch, a padding operation is required.\n\n # padding to form a batch\n max_size = max([bboxes.size(0) for bboxes in det_bboxes])\n for i, (bbox, label) in enumerate(zip(det_bboxes, det_labels)):\n supplement_bbox = bbox.new_full(\n (max_size - bbox.size(0), bbox.size(1)), 0)\n supplement_label = label.new_full((max_size - label.size(0), ), 0)\n det_bboxes[i] = torch.cat((supplement_bbox, bbox), dim=0)\n det_labels[i] = torch.cat((supplement_label, label), dim=0)\n det_bboxes = torch.stack(det_bboxes, dim=0)\n det_labels = torch.stack(det_labels, dim=0)\n\n batch_size = det_bboxes.size(0)\n num_proposals_per_img = det_bboxes.shape[1]\n\n # if det_bboxes is rescaled to the original image size, we need to\n # rescale it back to the testing scale to obtain RoIs.\n det_bboxes = det_bboxes[..., :4]\n if rescale:\n scale_factors = det_bboxes.new_tensor(scale_factors)\n det_bboxes = det_bboxes * scale_factors.unsqueeze(1)\n\n batch_index = torch.arange(\n det_bboxes.size(0), device=det_bboxes.device).float().view(\n -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)\n mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)\n mask_rois = mask_rois.view(-1, 5)\n mask_results = self._mask_forward(x, mask_rois)\n mask_pred = mask_results['mask_pred']\n\n # Recover the batch dimension\n mask_preds = mask_pred.reshape(batch_size, num_proposals_per_img,\n *mask_pred.shape[1:])\n\n # apply mask post-processing to each image individually\n segm_results = []\n for i in range(batch_size):\n mask_pred = mask_preds[i]\n det_bbox = det_bboxes[i]\n det_label = det_labels[i]\n\n # remove padding\n supplement_mask = det_bbox.abs().sum(dim=-1) != 0\n mask_pred = mask_pred[supplement_mask]\n det_bbox = det_bbox[supplement_mask]\n det_label = det_label[supplement_mask]\n\n if det_label.shape[0] == 0:\n segm_results.append([[]\n for _ in range(self.mask_head.num_classes)\n ])\n else:\n segm_result = self.mask_head.get_seg_masks(\n mask_pred, det_bbox, det_label, self.test_cfg,\n ori_shapes[i], scale_factors[i], rescale)\n segm_results.append(segm_result)\n return segm_results\n\n def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):\n \"\"\"Test for mask head with test time augmentation.\"\"\"\n if det_bboxes.shape[0] == 0:\n segm_result = [[] for _ in range(self.mask_head.num_classes)]\n else:\n aug_masks = []\n for x, img_meta in zip(feats, img_metas):\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n scale_factor, flip, flip_direction, img_meta[0].get('tile_offset', None)) # add by hui\n mask_rois = bbox2roi([_bboxes])\n mask_results = self._mask_forward(x, mask_rois)\n # convert to numpy array to save memory\n aug_masks.append(\n mask_results['mask_pred'].sigmoid().cpu().numpy())\n merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)\n\n ori_shape = img_metas[0][0]['ori_shape']\n scale_factor = det_bboxes.new_ones(4)\n segm_result = self.mask_head.get_seg_masks(\n merged_masks,\n det_bboxes,\n det_labels,\n self.test_cfg,\n ori_shape,\n scale_factor=scale_factor,\n rescale=False)\n return segm_result" }, { "identifier": "obb2xyxy", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def obb2xyxy(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to horizontal bounding boxes.\n\n Args:\n obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb]\n \"\"\"\n if version == 'oc':\n results = obb2xyxy_oc(rbboxes)\n elif version == 'le135':\n results = obb2xyxy_le135(rbboxes)\n elif version == 'le90':\n results = obb2xyxy_le90(rbboxes)\n else:\n raise NotImplementedError\n return results" }, { "identifier": "regularize_boxes", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def regularize_boxes(boxes,\n pattern: str = None,\n width_longer: bool = True,\n start_angle: float = -90) -> Tensor:\n \"\"\"Regularize rotated boxes.\n\n Due to the angle periodicity, one rotated box can be represented in\n many different (x, y, w, h, t). To make each rotated box unique,\n ``regularize_boxes`` will take the remainder of the angle divided by\n 180 degrees.\n\n However, after taking the remainder of the angle, there are still two\n representations for one rotate box. For example, (0, 0, 4, 5, 0.5) and\n (0, 0, 5, 4, 0.5 + pi/2) are the same areas in the image. To solve the\n problem, the code will swap edges w.r.t ``width_longer``:\n\n - width_longer=True: Make sure the width is longer than the height. If\n not, swap the width and height. The angle ranges in [start_angle,\n start_angle + 180). For the above example, the rotated box will be\n represented as (0, 0, 5, 4, 0.5 + pi/2).\n - width_longer=False: Make sure the angle is lower than\n start_angle+pi/2. If not, swap the width and height. The angle\n ranges in [start_angle, start_angle + 90). For the above example,\n the rotated box will be represented as (0, 0, 4, 5, 0.5).\n\n For convenience, three commonly used patterns are preset in\n ``regualrize_boxes``:\n\n - 'oc': OpenCV Definition. Has the same box representation as\n ``cv2.minAreaRect`` the angle ranges in [-90, 0). Equal to set\n width_longer=False and start_angle=-90.\n - 'le90': Long Edge Definition (90). the angle ranges in [-90, 90).\n The width is always longer than the height. Equal to set\n width_longer=True and start_angle=-90.\n - 'le135': Long Edge Definition (135). the angle ranges in [-45, 135).\n The width is always longer than the height. Equal to set\n width_longer=True and start_angle=-45.\n\n Args:\n pattern (str, Optional): Regularization pattern. Can only be 'oc',\n 'le90', or 'le135'. Defaults to None.\n width_longer (bool): Whether to make sure width is larger than\n height. Defaults to True.\n start_angle (float): The starting angle of the box angle\n represented in degrees. Defaults to -90.\n\n Returns:\n Tensor: Regularized box tensor.\n \"\"\"\n\n if pattern is not None:\n if pattern == 'oc':\n width_longer, start_angle = False, -90\n elif pattern == 'le90':\n width_longer, start_angle = True, -90\n elif pattern == 'le135':\n width_longer, start_angle = True, -45\n else:\n raise ValueError(\"pattern only can be 'oc', 'le90', and\"\n f\"'le135', but get {pattern}.\")\n start_angle = start_angle / 180 * np.pi\n\n x, y, w, h, t = boxes.unbind(dim=-1)\n if width_longer:\n # swap edge and angle if h >= w\n w_ = torch.where(w > h, w, h)\n h_ = torch.where(w > h, h, w)\n t = torch.where(w > h, t, t + np.pi / 2)\n t = ((t - start_angle) % np.pi) + start_angle\n else:\n # swap edge and angle if angle > pi/2\n t = ((t - start_angle) % np.pi)\n w_ = torch.where(t < np.pi / 2, w, h)\n h_ = torch.where(t < np.pi / 2, h, w)\n t = torch.where(t < np.pi / 2, t, t - np.pi / 2) + start_angle\n obb = torch.stack([x, y, w_, h_, t], dim=-1)\n return obb" }, { "identifier": "reduce_mean", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def reduce_mean(tensor):\n \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor" }, { "identifier": "obb2poly_np", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def obb2poly_np(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to polygons.\n\n Args:\n obbs (ndarray): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]\n \"\"\"\n if version == 'oc':\n results = obb2poly_np_oc(rbboxes)\n elif version == 'le135':\n results = obb2poly_np_le135(rbboxes)\n elif version == 'le90':\n results = obb2poly_np_le90(rbboxes)\n else:\n raise NotImplementedError\n return results" } ]
import math import torch import torch.nn.functional as F import torch.nn as nn import copy import numpy as np import cv2 from mmdet.core import bbox2result, bbox2roi, rbbox2roi, build_assigner, build_sampler, multi_apply from ..builder import HEADS, MODELS, build_head, build_roi_extractor, build_loss from .standard_roi_head import StandardRoIHead from .cascade_roi_head import CascadeRoIHead from mmdet.core.bbox.iou_calculators import bbox_overlaps from .test_mixins import BBoxTestMixin, MaskTestMixin from mmdet.core.bbox import bbox_xyxy_to_cxcywh from mmdet.core.bbox.transforms import rbbox2result from mmcv.cnn import Scale, ConvModule from mmcv.ops import box_iou_rotated from typing import Any, List, Sequence, Tuple, Union from torch import Tensor from mmdet.models.utils.base_bbox_coder import BaseBBoxCoder from ..detectors.utils import obb2xyxy, regularize_boxes, reduce_mean, obb2poly_np
15,667
# Dual-freq PSC for square-like problem if self.dual_freq: phase_targets = angle_targets * 4 phase_shift_targets += tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) return torch.cat(phase_shift_targets, axis=-1) def decode(self, angle_preds: Tensor, keepdim: bool = False) -> Tensor: """Phase-Shifting Decoder. Args: angle_preds (Tensor): The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) keepdim (bool): Whether the output tensor has dim retained or not. Returns: list[Tensor]: Angle offset for each scale level. Has shape (num_anchors * H * W, 1) when keepdim is true, (num_anchors * H * W) otherwise """ self.coef_sin = self.coef_sin.to(angle_preds) self.coef_cos = self.coef_cos.to(angle_preds) phase_sin = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase = -torch.atan2(phase_sin, phase_cos) # In range [-pi,pi) if self.dual_freq: phase_sin = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase2 = -torch.atan2(phase_sin, phase_cos) / 2 # Phase unwarpping, dual freq mixing # Angle between phase and phase2 is obtuse angle idx = torch.cos(phase) * torch.cos(phase2) + torch.sin( phase) * torch.sin(phase2) < 0 # Add pi to phase2 and keep it in range [-pi,pi) phase2[idx] = phase2[idx] % (2 * math.pi) - math.pi phase = phase2 # Set the angle of isotropic objects to zero phase[phase_mod < self.thr_mod] *= 0 angle_pred = phase / 2 return angle_pred @HEADS.register_module() class PointOBBHead(StandardRoIHead): """Simplest base roi head including one bbox head and one mask head.""" def __init__(self, bbox_roi_extractor, num_stages, bbox_head, top_k=7, with_atten=None, conv_cfg=None, norm_cfg=None, scale_angle: bool = True, stacked_convs = 4, loss_symmetry_ss=dict( type='SmoothL1Loss', loss_weight=1.0, beta=0.1), angle_coder=dict( type='PSCCoder', angle_version='le90', dual_freq=False, num_step=3, thr_mod=0), angle_version = 'le90', use_angle_loss = True, add_angle_pred_begin = False, not_use_rot_mil = False, detach_angle_head = False, rotation_agnostic_classes = None, agnostic_resize_classes = None, cls_scores_weight = 1.0, ins_scores_weight = 1.0, **kwargs): super(PointOBBHead, self).__init__(bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, **kwargs) self.threshold = 0.3 self.merge_mode = 'weighted_clsins' self.test_mean_iou = False # self.test_mean_iou = True self.sum_iou = 0 self.sum_num = 0 self.num_stages = num_stages self.topk1 = top_k # 7 self.topk2 = top_k # 7 self.featmap_strides = bbox_roi_extractor.featmap_strides self.with_atten = with_atten self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.in_channels=256 self.feat_channels=256 self.stacked_convs=stacked_convs self.is_scale_angle = scale_angle self.angle_coder = HEADS.build(angle_coder)
RangeType = Sequence[Tuple[int, int]] INF = 1e8 def meshgrid(x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exportingF # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def obb2cxcywh_le90(obboxes): """Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] Returns: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] """ center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1) Cos, Sin = torch.cos(theta), torch.sin(theta) x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin) y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos) bias = torch.cat([x_bias, y_bias], dim=-1) wh = bias * 2 return torch.cat([center, wh, torch.zeros_like(theta)], dim=-1) @HEADS.register_module() class PSCCoder(BaseBBoxCoder): """Phase-Shifting Coder. `Phase-Shifting Coder (PSC) <https://arxiv.org/abs/2211.06368>`. Args: angle_version (str): Angle definition. Only 'le90' is supported at present. dual_freq (bool, optional): Use dual frequency. Default: True. num_step (int, optional): Number of phase steps. Default: 3. thr_mod (float): Threshold of modulation. Default: 0.47. """ def __init__(self, angle_version: str, dual_freq: bool = True, num_step: int = 3, thr_mod: float = 0.47): super().__init__() self.angle_version = angle_version assert angle_version in ['le90'] self.dual_freq = dual_freq self.num_step = num_step self.thr_mod = thr_mod if self.dual_freq: self.encode_size = 2 * self.num_step else: self.encode_size = self.num_step self.coef_sin = torch.tensor( tuple( torch.sin(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) self.coef_cos = torch.tensor( tuple( torch.cos(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) def encode(self, angle_targets: Tensor) -> Tensor: """Phase-Shifting Encoder. Args: angle_targets (Tensor): Angle offset for each scale level. Has shape (num_anchors * H * W, 1) Returns: list[Tensor]: The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) """ phase_targets = angle_targets * 2 phase_shift_targets = tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) # Dual-freq PSC for square-like problem if self.dual_freq: phase_targets = angle_targets * 4 phase_shift_targets += tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) return torch.cat(phase_shift_targets, axis=-1) def decode(self, angle_preds: Tensor, keepdim: bool = False) -> Tensor: """Phase-Shifting Decoder. Args: angle_preds (Tensor): The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) keepdim (bool): Whether the output tensor has dim retained or not. Returns: list[Tensor]: Angle offset for each scale level. Has shape (num_anchors * H * W, 1) when keepdim is true, (num_anchors * H * W) otherwise """ self.coef_sin = self.coef_sin.to(angle_preds) self.coef_cos = self.coef_cos.to(angle_preds) phase_sin = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase = -torch.atan2(phase_sin, phase_cos) # In range [-pi,pi) if self.dual_freq: phase_sin = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase2 = -torch.atan2(phase_sin, phase_cos) / 2 # Phase unwarpping, dual freq mixing # Angle between phase and phase2 is obtuse angle idx = torch.cos(phase) * torch.cos(phase2) + torch.sin( phase) * torch.sin(phase2) < 0 # Add pi to phase2 and keep it in range [-pi,pi) phase2[idx] = phase2[idx] % (2 * math.pi) - math.pi phase = phase2 # Set the angle of isotropic objects to zero phase[phase_mod < self.thr_mod] *= 0 angle_pred = phase / 2 return angle_pred @HEADS.register_module() class PointOBBHead(StandardRoIHead): """Simplest base roi head including one bbox head and one mask head.""" def __init__(self, bbox_roi_extractor, num_stages, bbox_head, top_k=7, with_atten=None, conv_cfg=None, norm_cfg=None, scale_angle: bool = True, stacked_convs = 4, loss_symmetry_ss=dict( type='SmoothL1Loss', loss_weight=1.0, beta=0.1), angle_coder=dict( type='PSCCoder', angle_version='le90', dual_freq=False, num_step=3, thr_mod=0), angle_version = 'le90', use_angle_loss = True, add_angle_pred_begin = False, not_use_rot_mil = False, detach_angle_head = False, rotation_agnostic_classes = None, agnostic_resize_classes = None, cls_scores_weight = 1.0, ins_scores_weight = 1.0, **kwargs): super(PointOBBHead, self).__init__(bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, **kwargs) self.threshold = 0.3 self.merge_mode = 'weighted_clsins' self.test_mean_iou = False # self.test_mean_iou = True self.sum_iou = 0 self.sum_num = 0 self.num_stages = num_stages self.topk1 = top_k # 7 self.topk2 = top_k # 7 self.featmap_strides = bbox_roi_extractor.featmap_strides self.with_atten = with_atten self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.in_channels=256 self.feat_channels=256 self.stacked_convs=stacked_convs self.is_scale_angle = scale_angle self.angle_coder = HEADS.build(angle_coder)
self.loss_symmetry_ss = build_loss(loss_symmetry_ss)
4
2023-11-20 07:50:12+00:00
24k
ModelTC/EasyLLM
llm/models/hf_models/qwen_vl/modeling_qwen.py
[ { "identifier": "QWenConfig", "path": "llm/models/hf_models/qwen_vl/configuration_qwen.py", "snippet": "class QWenConfig(PretrainedConfig):\n model_type = \"qwen\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=151936,\n hidden_size=4096,\n num_hidden_layers=32,\n num_attention_heads=32,\n emb_dropout_prob=0.0,\n attn_dropout_prob=0.0,\n layer_norm_epsilon=1e-6,\n initializer_range=0.02,\n max_position_embeddings=8192,\n scale_attn_weights=True,\n use_cache=True,\n bf16=False,\n fp16=False,\n fp32=False,\n kv_channels=128,\n rotary_pct=1.0,\n rotary_emb_base=10000,\n use_dynamic_ntk=True,\n use_logn_attn=True,\n use_flash_attn=\"auto\",\n intermediate_size=22016,\n no_bias=True,\n tie_word_embeddings=False,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.emb_dropout_prob = emb_dropout_prob\n self.attn_dropout_prob = attn_dropout_prob\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_range = initializer_range\n self.scale_attn_weights = scale_attn_weights\n self.use_cache = use_cache\n self.max_position_embeddings = max_position_embeddings\n self.bf16 = bf16\n self.fp16 = fp16\n self.fp32 = fp32\n self.kv_channels = kv_channels\n self.rotary_pct = rotary_pct\n self.rotary_emb_base = rotary_emb_base\n self.use_dynamic_ntk = use_dynamic_ntk\n self.use_logn_attn = use_logn_attn\n self.use_flash_attn = use_flash_attn\n self.no_bias = no_bias\n super().__init__(\n tie_word_embeddings=tie_word_embeddings,\n **kwargs\n )" }, { "identifier": "make_context", "path": "llm/models/hf_models/qwen_vl/qwen_generation_utils.py", "snippet": "def make_context(\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: List[Tuple[str, str]] = None,\n system: str = \"\",\n max_window_size: int = 6144,\n chat_format: str = \"chatml\",\n):\n if history is None:\n history = []\n\n if chat_format == \"chatml\":\n im_start, im_end = \"<|im_start|>\", \"<|im_end|>\"\n im_start_tokens = [tokenizer.im_start_id]\n im_end_tokens = [tokenizer.im_end_id]\n nl_tokens = tokenizer.encode(\"\\n\")\n\n def _tokenize_str(role, content):\n return f\"{role}\\n{content}\", tokenizer.encode(\n role, allowed_special=set(tokenizer.IMAGE_ST)\n ) + nl_tokens + tokenizer.encode(content, allowed_special=set(tokenizer.IMAGE_ST))\n\n system_text, system_tokens_part = _tokenize_str(\"system\", system)\n system_tokens = im_start_tokens + system_tokens_part + im_end_tokens\n\n raw_text = \"\"\n context_tokens = []\n\n for turn_query, turn_response in reversed(history):\n query_text, query_tokens_part = _tokenize_str(\"user\", turn_query)\n query_tokens = im_start_tokens + query_tokens_part + im_end_tokens\n if turn_response is not None:\n response_text, response_tokens_part = _tokenize_str(\n \"assistant\", turn_response\n )\n response_tokens = im_start_tokens + response_tokens_part + im_end_tokens\n\n next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens\n prev_chat = (\n f\"\\n{im_start}{query_text}{im_end}\\n{im_start}{response_text}{im_end}\"\n )\n else:\n next_context_tokens = nl_tokens + query_tokens + nl_tokens\n prev_chat = f\"\\n{im_start}{query_text}{im_end}\\n\"\n\n current_context_size = (\n len(system_tokens) + len(next_context_tokens) + len(context_tokens)\n )\n if current_context_size < max_window_size:\n context_tokens = next_context_tokens + context_tokens\n raw_text = prev_chat + raw_text\n else:\n break\n\n context_tokens = system_tokens + context_tokens\n raw_text = f\"{im_start}{system_text}{im_end}\" + raw_text\n context_tokens += (\n nl_tokens\n + im_start_tokens\n + _tokenize_str(\"user\", query)[1]\n + im_end_tokens\n + nl_tokens\n + im_start_tokens\n + tokenizer.encode(\"assistant\")\n + nl_tokens\n )\n raw_text += f\"\\n{im_start}user\\n{query}{im_end}\\n{im_start}assistant\\n\"\n\n elif chat_format == \"raw\":\n raw_text = query\n context_tokens = tokenizer.encode(raw_text)\n else:\n raise NotImplementedError(f\"Unknown chat format {chat_format!r}\")\n\n return raw_text, context_tokens" }, { "identifier": "HistoryType", "path": "llm/models/hf_models/qwen/qwen_generation_utils.py", "snippet": "def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:\ndef get_ltor_masks_and_position_ids(\n data,\n eod_token,\n reset_position_ids,\n reset_attention_mask,\n eod_mask_loss,\n):\ndef get_batch(context_tokens: torch.LongTensor, eod_id: int):\ndef get_stop_words_ids(chat_format, tokenizer):\ndef make_context(\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: List[Tuple[str, str]] = None,\n system: str = \"\",\n max_window_size: int = 6144,\n chat_format: str = \"chatml\",\n):\n def _tokenize_str(role, content):\ndef _decode_default(\n tokens: List[int],\n *,\n stop_words: List[str],\n eod_words: List[str],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = 'replace',\n):\ndef _decode_chatml(\n tokens: List[int],\n *,\n stop_words: List[str],\n eod_token_ids: List[int],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n context_length: int,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = 'replace'\n):\ndef decode_tokens(\n tokens: Union[torch.LongTensor, TokensType],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n context_length: int,\n chat_format: str,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = \"replace\",\n) -> str:\n def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):\n def __call__(\n self, input_ids: torch.LongTensor, scores: torch.FloatTensor\n ) -> torch.FloatTensor:\n def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:\n def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:\ndef top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float(\"Inf\")):\ndef switch(val1, val2, boolean):\nclass StopWordsLogitsProcessor(LogitsProcessor):" }, { "identifier": "VisionTransformer", "path": "llm/models/hf_models/qwen_vl/visual.py", "snippet": "class VisionTransformer(nn.Module):\n\n def __init__(\n self,\n image_size: int,\n patch_size: int,\n width: int,\n layers: int,\n heads: int,\n mlp_ratio: float,\n n_queries: int = 256,\n output_dim: int = 512,\n **kwargs\n ):\n super().__init__()\n image_height, image_width = self.image_size = (image_size, image_size)\n patch_height, patch_width = self.patch_size = (patch_size, patch_size)\n self.grid_size = (image_height // patch_height, image_width // patch_width)\n self.output_dim = output_dim\n\n mean = (0.48145466, 0.4578275, 0.40821073)\n std = (0.26862954, 0.26130258, 0.27577711)\n self.image_transform = transforms.Compose([\n transforms.Resize(\n (image_size, image_size),\n interpolation=InterpolationMode.BICUBIC\n ),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std),\n ])\n\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n # class embeddings and positional embeddings\n scale = width ** -0.5\n self.positional_embedding = nn.Parameter(scale * torch.randn(256, width))\n\n norm_layer = partial(nn.LayerNorm, eps=1e-6)\n act_layer = nn.GELU\n\n self.ln_pre = norm_layer(width)\n self.transformer = TransformerBlock(\n width,\n layers,\n heads,\n mlp_ratio,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n\n self.attn_pool = Resampler(\n grid_size=int(math.sqrt(n_queries)),\n embed_dim=output_dim,\n num_heads=output_dim // 128,\n kv_dim=width,\n norm_layer=norm_layer,\n )\n self.ln_post = norm_layer(output_dim)\n self.proj = nn.Parameter((output_dim ** -0.5) * torch.randn(output_dim, output_dim))\n\n def forward(self, x: torch.Tensor):\n x = x.to(\n dtype=self.transformer.get_cast_dtype(),\n device=self.transformer.get_cast_device(),\n )\n # to patches\n x = self.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n x = x + get_abs_pos(self.positional_embedding, x.size(1))\n\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n x = self.attn_pool(x)\n x = self.ln_post(x)\n x = x @ self.proj\n\n return x\n\n def encode(self, image_paths: List[str]):\n images = []\n for image_path in image_paths:\n if image_path.startswith(\"http://\") or image_path.startswith(\"https://\"):\n image = Image.open(requests.get(image_path, stream=True).raw)\n else:\n image = Image.open(image_path)\n image = image.convert(\"RGB\")\n images.append(self.image_transform(image))\n images = torch.stack(images, dim=0)\n return self(images)" }, { "identifier": "RMSNorm", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class RMSNorm(torch.nn.Module):\n def __init__(self, dim: int, eps: float = 1e-6):\n super().__init__()\n self.eps = eps\n self.weight = nn.Parameter(torch.ones(dim))\n\n def _norm(self, x):\n return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)\n\n def forward(self, x):\n if rms_norm is not None and x.is_cuda:\n return rms_norm(x, self.weight, self.eps)\n else:\n output = self._norm(x.float()).type_as(x)\n return output * self.weight" }, { "identifier": "apply_rotary_pos_emb", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "def apply_rotary_pos_emb(t, freqs):\n cos, sin = freqs\n if apply_rotary_emb_func is not None and t.is_cuda:\n t_ = t.float()\n cos = cos.squeeze(0).squeeze(1)[:, : cos.shape[-1] // 2]\n sin = sin.squeeze(0).squeeze(1)[:, : sin.shape[-1] // 2]\n output = apply_rotary_emb_func(t_, cos, sin).type_as(t)\n return output\n else:\n rot_dim = freqs[0].shape[-1]\n cos, sin = freqs\n t_, t_pass_ = t[..., :rot_dim], t[..., rot_dim:]\n t_ = t_.float()\n t_pass_ = t_pass_.float()\n t_ = (t_ * cos) + (_rotate_half(t_) * sin)\n return torch.cat((t_, t_pass_), dim=-1).type_as(t)" }, { "identifier": "QWenMLP", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.w1 = nn.Linear(\n config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias\n )\n self.w2 = nn.Linear(\n config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias\n )\n ff_dim_in = config.intermediate_size // 2\n self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)\n\n def forward(self, hidden_states):\n a1 = self.w1(hidden_states)\n a2 = self.w2(hidden_states)\n intermediate_parallel = a1 * F.silu(a2)\n output = self.c_proj(intermediate_parallel)\n return output" }, { "identifier": "QWenAttention", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.register_buffer(\"masked_bias\", torch.tensor(-1e4), persistent=False)\n self.seq_length = config.seq_length\n\n self.hidden_size = config.hidden_size\n self.split_size = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.hidden_size // self.num_heads\n\n self.use_flash_attn = config.use_flash_attn\n self.scale_attn_weights = True\n\n self.projection_size = config.kv_channels * config.num_attention_heads\n\n assert self.projection_size % config.num_attention_heads == 0\n self.hidden_size_per_attention_head = (\n self.projection_size // config.num_attention_heads\n )\n\n self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size)\n\n self.c_proj = nn.Linear(\n config.hidden_size, self.projection_size, bias=not config.no_bias\n )\n\n self.is_fp32 = not (config.bf16 or config.fp16)\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n ):\n self.core_attention_flash = FlashSelfAttention(\n causal=True, attention_dropout=config.attn_dropout_prob\n )\n self.bf16 = config.bf16\n\n self.use_dynamic_ntk = config.use_dynamic_ntk\n self.use_logn_attn = config.use_logn_attn\n\n logn_list = [\n math.log(i, self.seq_length) if i > self.seq_length else 1\n for i in range(1, 32768)\n ]\n logn_tensor = torch.tensor(logn_list)[None, :, None, None]\n self.register_buffer(\"logn_tensor\", logn_tensor, persistent=False)\n\n self.attn_dropout = nn.Dropout(config.attn_dropout_prob)\n self.softmax_in_fp32 = config.softmax_in_fp32 if hasattr(config, 'softmax_in_fp32') else False\n self.use_cache_quantization = config.use_cache_quantization if hasattr(\n config, 'use_cache_quantization') else False\n self.use_cache_kernel = config.use_cache_kernel if hasattr(config, 'use_cache_kernel') else False\n cache_dtype = torch.float\n if self.bf16:\n cache_dtype = torch.bfloat16\n elif config.fp16:\n cache_dtype = torch.float16\n self.cache_qmax = torch.tensor(torch.iinfo(torch.uint8).max, dtype=cache_dtype)\n self.cache_qmin = torch.tensor(torch.iinfo(torch.uint8).min, dtype=cache_dtype)\n\n if config.use_cache_quantization and config.use_cache_kernel:\n try:\n from .cpp_kernels import cache_autogptq_cuda_256\n self.cache_kernels = cache_autogptq_cuda_256\n except ImportError:\n self.cache_kernels = None\n\n def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None):\n device = query.device\n if self.use_cache_quantization:\n qk, qk_scale, qk_zero = key\n if self.use_cache_kernel and self.cache_kernels is not None:\n shape = query.shape[:-1] + (qk.shape[-2],)\n attn_weights = torch.zeros(shape, dtype=torch.float16, device=device)\n self.cache_kernels.vecquant8matmul_batched_faster_old(\n query.contiguous() if query.dtype == torch.float16 else query.to(torch.float16).contiguous(),\n qk.transpose(-1, -2).contiguous(),\n attn_weights,\n qk_scale.contiguous() if qk_scale.dtype == torch.float16 else qk_scale.to(torch.float16).contiguous(),\n qk_zero.contiguous()if qk_zero.dtype == torch.float16 else qk_zero.to(torch.float16).contiguous())\n # attn_weights = attn_weights.to(query.dtype).contiguous()\n else:\n key = dequantize_cache_torch(qk, qk_scale, qk_zero)\n attn_weights = torch.matmul(query, key.transpose(-1, -2))\n else:\n attn_weights = torch.matmul(query, key.transpose(-1, -2))\n\n if self.scale_attn_weights:\n if self.use_cache_quantization:\n size_temp = value[0].size(-1)\n else:\n size_temp = value.size(-1)\n attn_weights = attn_weights / torch.full(\n [],\n size_temp ** 0.5,\n dtype=attn_weights.dtype,\n device=attn_weights.device,\n )\n if self.use_cache_quantization:\n query_length, key_length = query.size(-2), key[0].size(-2)\n else:\n query_length, key_length = query.size(-2), key.size(-2)\n causal_mask = registered_causal_mask[\n :, :, key_length - query_length: key_length, :key_length\n ]\n mask_value = torch.finfo(attn_weights.dtype).min\n mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(\n attn_weights.device\n )\n attn_weights = torch.where(\n causal_mask, attn_weights.to(attn_weights.dtype), mask_value\n )\n\n if attention_mask is not None:\n attn_weights = attn_weights + attention_mask\n\n if self.softmax_in_fp32:\n attn_weights = nn.functional.softmax(attn_weights.float(), dim=-1)\n else:\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n attn_weights = attn_weights.type(query.dtype)\n attn_weights = self.attn_dropout(attn_weights)\n\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n if self.use_cache_quantization:\n qv, qv_scale, qv_zero = value\n if self.use_cache_kernel and self.cache_kernels is not None:\n shape = attn_weights.shape[:-1] + (query.shape[-1],)\n attn_output = torch.zeros(shape, dtype=torch.float16, device=device)\n self.cache_kernels.vecquant8matmul_batched_column_compression_faster_old(\n attn_weights.contiguous() if attn_weights.dtype == torch.float16 else attn_weights.to(torch.float16).contiguous(),\n qv.contiguous(), # dtype: int32\n attn_output,\n qv_scale.contiguous() if qv_scale.dtype == torch.float16 else qv_scale.to(torch.float16).contiguous(),\n qv_zero.contiguous() if qv_zero.dtype == torch.float16 else qv_zero.to(torch.float16).contiguous())\n if attn_output.dtype != query.dtype:\n attn_output = attn_output.to(query.dtype)\n attn_weights = attn_weights.to(query.dtype)\n else:\n value = dequantize_cache_torch(qv, qv_scale, qv_zero)\n attn_output = torch.matmul(attn_weights, value)\n else:\n attn_output = torch.matmul(attn_weights, value)\n\n attn_output = attn_output.transpose(1, 2)\n\n return attn_output, attn_weights\n\n def _upcast_and_reordered_attn(\n self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None\n ):\n bsz, num_heads, q_seq_len, dk = query.size()\n _, _, k_seq_len, _ = key.size()\n\n attn_weights = torch.empty(\n bsz * num_heads,\n q_seq_len,\n k_seq_len,\n dtype=torch.float32,\n device=query.device,\n )\n\n scale_factor = 1.0\n if self.scale_attn_weights:\n scale_factor /= float(value.size(-1)) ** 0.5\n\n with autocast(enabled=False):\n q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(\n -1, dk, k_seq_len\n )\n attn_weights = torch.baddbmm(\n attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor\n )\n attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)\n\n query_length, key_length = query.size(-2), key.size(-2)\n causal_mask = registered_causal_mask[\n :, :, key_length - query_length: key_length, :key_length\n ]\n mask_value = torch.finfo(attn_weights.dtype).min\n mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(\n attn_weights.device\n )\n attn_weights = torch.where(causal_mask, attn_weights, mask_value)\n\n if attention_mask is not None:\n attn_weights = attn_weights + attention_mask\n\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n if attn_weights.dtype != torch.float32:\n raise RuntimeError(\n \"Error with upcasting, attn_weights does not have dtype torch.float32\"\n )\n attn_weights = attn_weights.type(value.dtype)\n attn_weights = self.attn_dropout(attn_weights)\n\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n\n return attn_output, attn_weights\n\n def _split_heads(self, tensor, num_heads, attn_head_size):\n new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)\n tensor = tensor.view(new_shape)\n return tensor\n\n def _merge_heads(self, tensor, num_heads, attn_head_size):\n tensor = tensor.contiguous()\n new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)\n return tensor.view(new_shape)\n\n def forward(\n self,\n hidden_states: Optional[Tuple[torch.FloatTensor]],\n rotary_pos_emb_list: Optional[List[List[torch.Tensor]]] = None,\n registered_causal_mask: Optional[torch.Tensor] = None,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n ):\n mixed_x_layer = self.c_attn(hidden_states)\n\n query, key, value = mixed_x_layer.split(self.split_size, dim=2)\n\n query = self._split_heads(query, self.num_heads, self.head_dim)\n key = self._split_heads(key, self.num_heads, self.head_dim)\n value = self._split_heads(value, self.num_heads, self.head_dim)\n\n if rotary_pos_emb_list is not None:\n cur_len = query.shape[1]\n if len(rotary_pos_emb_list) == 1:\n rotary_pos_emb = rotary_pos_emb_list[0]\n rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]\n rotary_pos_emb = (rotary_pos_emb,) * 2\n q_pos_emb, k_pos_emb = rotary_pos_emb\n # Slice the pos emb for current inference\n query = apply_rotary_pos_emb(query, q_pos_emb)\n key = apply_rotary_pos_emb(key, k_pos_emb)\n else:\n query_list = []\n key_list = []\n for i, rotary_pos_emb in enumerate(rotary_pos_emb_list):\n rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]\n rotary_pos_emb = (rotary_pos_emb,) * 2\n q_pos_emb, k_pos_emb = rotary_pos_emb\n # Slice the pos emb for current inference\n query_list += [apply_rotary_pos_emb(query[i:i + 1, :, :], q_pos_emb)]\n key_list += [apply_rotary_pos_emb(key[i:i + 1, :, :], k_pos_emb)]\n query = torch.cat(query_list, dim=0)\n key = torch.cat(key_list, dim=0)\n\n if self.use_cache_quantization:\n key = quantize_cache_v(key.permute(0, 2, 1, 3),\n bits=8,\n qmin=self.cache_qmin,\n qmax=self.cache_qmax)\n value = quantize_cache_v(value.permute(0, 2, 1, 3),\n bits=8,\n qmin=self.cache_qmin,\n qmax=self.cache_qmax)\n\n if layer_past is not None:\n past_key, past_value = layer_past[0], layer_past[1]\n if self.use_cache_quantization:\n # use_cache_quantization:\n # present=((q_key,key_scale,key_zero_point),\n # (q_value,value_scale,value_zero_point))\n key = (torch.cat((past_key[0], key[0]), dim=2),\n torch.cat((past_key[1], key[1]), dim=2),\n torch.cat((past_key[2], key[2]), dim=2))\n value = (torch.cat((past_value[0], value[0]), dim=2),\n torch.cat((past_value[1], value[1]), dim=2),\n torch.cat((past_value[2], value[2]), dim=2))\n else:\n # not use_cache_quantization:\n # present=(key,value)\n key = torch.cat((past_key, key), dim=1)\n value = torch.cat((past_value, value), dim=1)\n\n if use_cache:\n present = (key, value)\n else:\n present = None\n\n if self.use_logn_attn and not self.training:\n if self.use_cache_quantization:\n seq_start = key[0].size(2) - query.size(1)\n seq_end = key[0].size(2)\n else:\n seq_start = key.size(1) - query.size(1)\n seq_end = key.size(1)\n logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :].type_as(query)\n query = query * logn_tensor.expand_as(query)\n\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n and query.is_cuda\n ):\n q, k, v = query, key, value\n attn_output = self.core_attention_flash(q, k, v, attention_mask=attention_mask)\n else:\n query = query.permute(0, 2, 1, 3)\n if not self.use_cache_quantization:\n key = key.permute(0, 2, 1, 3)\n value = value.permute(0, 2, 1, 3)\n if (\n registered_causal_mask is None\n and self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n and not query.is_cuda\n ):\n raise Exception(_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED)\n\n if not self.use_cache_quantization and SUPPORT_TORCH2:\n causal_mask = registered_causal_mask[\n :, :, key.size(-2) - query.size(-2): key.size(-2), :key.size(-2)\n ]\n if attention_mask is not None:\n attention_mask = attention_mask.expand(\n -1, -1, causal_mask.size(2), -1\n ).masked_fill(~causal_mask, torch.finfo(query.dtype).min)\n else:\n attention_mask = causal_mask\n attn_output = F.scaled_dot_product_attention(\n query, key, value, attn_mask=attention_mask\n ).transpose(1, 2)\n attn_weight = None\n else:\n attn_output, attn_weight = self._attn(\n query, key, value, registered_causal_mask, attention_mask, head_mask\n )\n context_layer = self._merge_heads(\n attn_output, self.num_heads, self.head_dim\n )\n\n attn_output = self.c_proj(context_layer)\n\n outputs = (attn_output, present)\n if output_attentions:\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n ):\n raise ValueError(\"Cannot output attentions while using flash-attn\")\n else:\n outputs += (attn_weight,)\n\n return outputs" }, { "identifier": "QWenModel", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenModel(QWenPreTrainedModel):\n _keys_to_ignore_on_load_missing = [\"attn.masked_bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.vocab_size = config.vocab_size\n self.num_hidden_layers = config.num_hidden_layers\n self.embed_dim = config.hidden_size\n self.use_cache_quantization = self.config.use_cache_quantization if hasattr(\n self.config, 'use_cache_quantization') else False\n\n self.gradient_checkpointing = False\n self.use_dynamic_ntk = config.use_dynamic_ntk\n self.seq_length = config.seq_length\n\n self.wte = nn.Embedding(self.vocab_size, self.embed_dim)\n\n self.drop = nn.Dropout(config.emb_dropout_prob)\n\n if config.rotary_pct == 1.0:\n self.rotary_ndims = None\n else:\n assert config.rotary_pct < 1\n self.rotary_ndims = int(\n config.kv_channels * config.rotary_pct\n )\n dim = (\n self.rotary_ndims\n if self.rotary_ndims is not None\n else config.kv_channels\n )\n self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)\n\n self.use_flash_attn = config.use_flash_attn\n self.is_fp32 = not (config.bf16 or config.fp16)\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n ):\n self.registered_causal_mask = None\n else:\n max_positions = config.max_position_embeddings\n self.register_buffer(\n \"registered_causal_mask\",\n torch.tril(\n torch.ones((max_positions, max_positions), dtype=torch.bool)\n ).view(1, 1, max_positions, max_positions),\n persistent=False,\n )\n\n self.h = nn.ModuleList(\n [\n QWenBlock(\n config\n )\n for i in range(config.num_hidden_layers)\n ]\n )\n self.ln_f = RMSNorm(\n self.embed_dim,\n eps=config.layer_norm_epsilon,\n )\n\n self.post_init()\n\n def get_input_embeddings(self):\n return self.wte\n\n def set_input_embeddings(self, new_embeddings):\n self.wte = new_embeddings\n\n def get_ntk_alpha(self, true_seq_len):\n context_value = math.log(true_seq_len / self.seq_length, 2) + 1\n ntk_alpha = 2 ** math.ceil(context_value) - 1\n ntk_alpha = max(ntk_alpha, 1)\n return ntk_alpha\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\n \"You cannot specify both input_ids and inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n if position_ids is not None:\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if past_key_values is None:\n past_length = 0\n past_key_values = tuple([None] * len(self.h))\n else:\n if self.use_cache_quantization:\n past_length = past_key_values[0][0][0].size(2)\n else:\n past_length = past_key_values[0][0].size(-2)\n if position_ids is None:\n position_ids = torch.arange(\n past_length,\n input_shape[-1] + past_length,\n dtype=torch.long,\n device=device,\n )\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n if attention_mask is not None:\n if batch_size <= 0:\n raise ValueError(\"batch_size has to be defined and > 0\")\n attention_mask = attention_mask.view(batch_size, -1)\n attention_mask = attention_mask[:, None, None, :]\n attention_mask = attention_mask.to(dtype=self.dtype)\n attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min\n\n encoder_attention_mask = None\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n hidden_states = inputs_embeds\n\n kv_seq_len = hidden_states.size()[1]\n if past_key_values[0] is not None:\n # past key values[0][0] shape: bs * seq_len * head_num * dim\n if self.use_cache_quantization:\n kv_seq_len += past_key_values[0][0][0].shape[2]\n else:\n kv_seq_len += past_key_values[0][0].shape[1]\n\n if self.training or not self.use_dynamic_ntk:\n ntk_alpha_list = [1.0]\n elif kv_seq_len != hidden_states.size()[1]:\n ntk_alpha_list = self.rotary_emb._ntk_alpha_cached_list\n else:\n ntk_alpha_list = []\n if attention_mask is not None and kv_seq_len > self.seq_length:\n true_seq_lens = attention_mask.squeeze(1).squeeze(1).eq(0).sum(dim=-1, dtype=torch.int32)\n for i in range(hidden_states.size()[0]):\n true_seq_len = true_seq_lens[i].item()\n ntk_alpha = self.get_ntk_alpha(true_seq_len)\n ntk_alpha_list.append(ntk_alpha)\n else:\n ntk_alpha = self.get_ntk_alpha(kv_seq_len)\n ntk_alpha_list.append(ntk_alpha)\n self.rotary_emb._ntk_alpha_cached_list = ntk_alpha_list\n rotary_pos_emb_list = [\n self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for ntk_alpha in ntk_alpha_list\n ]\n\n hidden_states = self.drop(hidden_states)\n output_shape = input_shape + (hidden_states.size(-1),)\n\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, use_cache, output_attentions)\n\n return custom_forward\n\n outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(block),\n hidden_states,\n rotary_pos_emb_list,\n self.registered_causal_mask,\n None,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n rotary_pos_emb_list=rotary_pos_emb_list,\n registered_causal_mask=self.registered_causal_mask,\n attention_mask=attention_mask,\n head_mask=head_mask[i],\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n\n hidden_states = self.ln_f(hidden_states)\n hidden_states = hidden_states.view(output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v for v in [hidden_states, presents, all_hidden_states] if v is not None\n )\n\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )" }, { "identifier": "QWenLMHeadModel", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenLMHeadModel(QWenPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.rotary_emb\\.inv_freq\"]\n _keys_to_ignore_on_load_unexpected = [r\"h\\.\\d+\\.attn\\.masked_bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n assert (\n config.bf16 + config.fp16 + config.fp32 <= 1\n ), \"Only one of \\\"bf16\\\", \\\"fp16\\\", \\\"fp32\\\" can be true\"\n logger.warn(\n \"Warning: please make sure that you are using the latest codes and checkpoints, \"\n \"especially if you used Qwen-7B before 09.25.2023.\"\n \"请使用最新模型和代码,尤其如果你在9月25日前已经开始使用Qwen-7B,千万注意不要使用错误代码和模型。\"\n )\n\n autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0\n\n if autoset_precision:\n if SUPPORT_BF16:\n logger.warn(\n \"The model is automatically converting to bf16 for faster inference. \"\n \"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \\\"AutoModelForCausalLM.from_pretrained\\\".\" # noqa\n )\n config.bf16 = True\n elif SUPPORT_FP16:\n logger.warn(\n \"The model is automatically converting to fp16 for faster inference. \"\n \"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \\\"AutoModelForCausalLM.from_pretrained\\\".\" # noqa\n )\n config.fp16 = True\n else:\n config.fp32 = True\n\n if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:\n logger.warn(\n \"Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\") # noqa\n if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:\n logger.warn(\n \"Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster\")\n if config.fp32:\n if SUPPORT_BF16:\n logger.warn(\n \"Your device support faster inference by passing bf16=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\")\n elif SUPPORT_FP16:\n logger.warn(\n \"Your device support faster inference by passing fp16=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\")\n\n if config.use_flash_attn == \"auto\":\n if config.bf16 or config.fp16:\n logger.warn(\"Try importing flash-attention for faster inference...\")\n config.use_flash_attn = True\n else:\n config.use_flash_attn = False\n if config.use_flash_attn and config.fp32:\n logger.warn(\"Flash attention will be disabled because it does NOT support fp32.\")\n\n if config.use_flash_attn:\n _import_flash_attn()\n\n self.transformer = QWenModel(config)\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n if config.bf16:\n self.transformer.bfloat16()\n self.lm_head.bfloat16()\n if config.fp16:\n self.transformer.half()\n self.lm_head.half()\n self.post_init()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs\n ):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n if past_key_values:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n else:\n position_ids = None\n\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n )\n return model_inputs\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n transformer_outputs = self.transformer(\n input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n labels = labels.to(lm_logits.device)\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(\n shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)\n )\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n @staticmethod\n def _reorder_cache(\n past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor\n ) -> Tuple[Tuple[torch.Tensor]]:\n\n return tuple(\n tuple(\n past_state.index_select(0, beam_idx.to(past_state.device))\n for past_state in layer_past\n )\n for layer_past in past_key_values\n )\n\n def chat(\n self,\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: Optional[HistoryType],\n system: str = \"You are a helpful assistant.\",\n append_history: bool = True,\n stream: Optional[bool] = _SENTINEL,\n stop_words_ids: Optional[List[List[int]]] = None,\n generation_config: Optional[GenerationConfig] = None,\n **kwargs,\n ) -> Tuple[str, HistoryType]:\n generation_config = generation_config if generation_config is not None else self.generation_config\n\n assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT\n assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT\n if history is None:\n history = []\n if stop_words_ids is None:\n stop_words_ids = []\n\n max_window_size = kwargs.get('max_window_size', None)\n if max_window_size is None:\n max_window_size = generation_config.max_window_size\n raw_text, context_tokens = make_context(\n tokenizer,\n query,\n history=history,\n system=system,\n max_window_size=max_window_size,\n chat_format=generation_config.chat_format,\n )\n\n stop_words_ids.extend(get_stop_words_ids(\n generation_config.chat_format, tokenizer\n ))\n input_ids = torch.tensor([context_tokens]).to(self.device)\n outputs = self.generate(\n input_ids,\n stop_words_ids=stop_words_ids,\n return_dict_in_generate=False,\n generation_config=generation_config,\n **kwargs,\n )\n\n response = decode_tokens(\n outputs[0],\n tokenizer,\n raw_text_len=len(raw_text),\n context_length=len(context_tokens),\n chat_format=generation_config.chat_format,\n verbose=False,\n errors='replace'\n )\n\n if append_history:\n history.append((query, response))\n\n return response, history\n\n def chat_stream(\n self,\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: Optional[HistoryType],\n system: str = \"You are a helpful assistant.\",\n stop_words_ids: Optional[List[List[int]]] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n generation_config: Optional[GenerationConfig] = None,\n **kwargs,\n ) -> Generator[str, Any, None]:\n generation_config = generation_config if generation_config is not None else self.generation_config\n assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT\n if history is None:\n history = []\n if stop_words_ids is None:\n stop_words_ids = []\n\n max_window_size = kwargs.get('max_window_size', None)\n if max_window_size is None:\n max_window_size = generation_config.max_window_size\n raw_text, context_tokens = make_context(\n tokenizer,\n query,\n history=history,\n system=system,\n max_window_size=max_window_size,\n chat_format=generation_config.chat_format,\n )\n\n stop_words_ids.extend(get_stop_words_ids(\n generation_config.chat_format, tokenizer\n ))\n if stop_words_ids is not None:\n stop_words_logits_processor = StopWordsLogitsProcessor(\n stop_words_ids=stop_words_ids,\n eos_token_id=generation_config.eos_token_id,\n )\n if logits_processor is None:\n logits_processor = LogitsProcessorList([stop_words_logits_processor])\n else:\n logits_processor.append(stop_words_logits_processor)\n input_ids = torch.tensor([context_tokens]).to(self.device)\n\n from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig\n self.__class__.generate_stream = NewGenerationMixin.generate\n self.__class__.sample_stream = NewGenerationMixin.sample_stream\n stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)\n\n def stream_generator():\n outputs = []\n for token in self.generate_stream(\n input_ids,\n return_dict_in_generate=False,\n generation_config=stream_config,\n logits_processor=logits_processor,\n seed=-1,\n **kwargs):\n outputs.append(token.item())\n yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore')\n\n return stream_generator()\n\n def generate(\n self,\n inputs: Optional[torch.Tensor] = None,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[\n Callable[[int, torch.Tensor], List[int]]\n ] = None,\n synced_gpus: Optional[bool] = None,\n assistant_model: Optional[\"PreTrainedModel\"] = None,\n streamer: Optional[\"BaseStreamer\"] = None,\n **kwargs,\n ) -> Union[GenerateOutput, torch.LongTensor]:\n generation_config = generation_config if generation_config is not None else self.generation_config\n\n # Process stop_words_ids.\n stop_words_ids = kwargs.pop(\"stop_words_ids\", None)\n if stop_words_ids is None and generation_config is not None:\n stop_words_ids = getattr(generation_config, \"stop_words_ids\", None)\n if stop_words_ids is None:\n stop_words_ids = getattr(generation_config, \"stop_words_ids\", None)\n\n if stop_words_ids is not None:\n stop_words_logits_processor = StopWordsLogitsProcessor(\n stop_words_ids=stop_words_ids,\n eos_token_id=generation_config.eos_token_id,\n )\n if logits_processor is None:\n logits_processor = LogitsProcessorList([stop_words_logits_processor])\n else:\n logits_processor.append(stop_words_logits_processor)\n\n return super().generate(\n inputs,\n generation_config=generation_config,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n synced_gpus=synced_gpus,\n assistant_model=assistant_model,\n streamer=streamer,\n **kwargs,\n )" } ]
import importlib import math import torch # noqa import torch.nn.functional as F # noqa import torch.utils.checkpoint # noqa from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union, Callable, List, Any, Generator # noqa from torch.cuda.amp import autocast # noqa from torch.nn import CrossEntropyLoss from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList # noqa from transformers.generation.logits_process import LogitsProcessorList # noqa from transformers.generation.streamers import BaseStreamer # noqa from transformers.generation.utils import GenerateOutput # noqa from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel # noqa from transformers.utils import logging from einops import rearrange from torch import nn from .configuration_qwen import QWenConfig # noqa from .qwen_generation_utils import ( make_context, ) # noqa from llm.models.hf_models.qwen.qwen_generation_utils import ( HistoryType, decode_tokens, get_stop_words_ids, ) from .visual import VisionTransformer from llm.models.hf_models.qwen.modeling_qwen import RMSNorm, apply_rotary_pos_emb, QWenMLP from llm.models.hf_models.qwen.modeling_qwen import QWenAttention as QWenAttention_chat from llm.models.hf_models.qwen.modeling_qwen import QWenModel as QWenModel_chat from llm.models.hf_models.qwen.modeling_qwen import QWenLMHeadModel as QWenLMHeadModel_chat from einops import rearrange
14,914
SUPPORT_CUDA = torch.cuda.is_available() SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported() SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7 logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "qwen" _CONFIG_FOR_DOC = "QWenConfig" QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"] _ERROR_BAD_CHAT_FORMAT = """\ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml". If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat(). 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。 """ _SENTINEL = object() _ERROR_STREAM_IN_CHAT = """\ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True). 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。 """ apply_rotary_emb_func = None rms_norm = None # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class QWenAttention(QWenAttention_chat): def __init__(self, config): super().__init__(config) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference
# Copyright (c) Alibaba Cloud. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. if TYPE_CHECKING: try: except ImportError: rearrange = None SUPPORT_CUDA = torch.cuda.is_available() SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported() SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7 logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "qwen" _CONFIG_FOR_DOC = "QWenConfig" QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"] _ERROR_BAD_CHAT_FORMAT = """\ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml". If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat(). 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。 """ _SENTINEL = object() _ERROR_STREAM_IN_CHAT = """\ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True). 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。 """ apply_rotary_emb_func = None rms_norm = None # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class QWenAttention(QWenAttention_chat): def __init__(self, config): super().__init__(config) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference
query = apply_rotary_pos_emb(query, q_pos_emb)
5
2023-11-26 10:12:52+00:00
24k
danilonumeroso/conar
models/tsp_reasoner.py
[ { "identifier": "vmapped_beam_search_rollout", "path": "baselines/beam_search.py", "snippet": "BEAM_WIDTH = 128\ndef expand_single(beam_vis, beam_last, beam_cost, beam_par, W):\ndef beam_search_rollout_step(W, beam_width, i, tpl):\ndef beam_search_rollout(start_route, W, num_nodes, beam_width):\ndef beam_search_baseline(data, return_ratio=True):" }, { "identifier": "AlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class AlgorithmReasoner(nn.Module):\n @staticmethod\n def prepare_batch(batch):\n batch = batch.clone()\n for name, tensor in batch.items():\n if not torch.is_tensor(tensor):\n continue\n if name.endswith('_temporal') and 'index' not in name:\n tensor = tensor.transpose(1, 0)\n batch[name] = tensor\n return batch\n\n @staticmethod\n def get_masks(train, batch, continue_logits, enforced_mask):\n mask = continue_logits[batch.batch] > 0\n mask_cp = (continue_logits > 0.0).bool()\n mask_edges = mask[batch.edge_index[0]]\n if not train and enforced_mask is not None:\n enforced_mask_ids = enforced_mask[batch.batch]\n mask &= enforced_mask_ids\n mask_cp &= enforced_mask\n return mask_cp, mask, mask_edges\n\n def add_encoder(self, stage, name, loc, data_type, data_sample, bias):\n if name == 'adj': # we use edge indices\n return\n if data_type == Type.SCALAR or data_type == Type.MASK or data_type == Type.MASK_ONE:\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n self.encoders[stage][name] = nn.Linear(in_shape, self.latent_features, bias=bias)\n\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are 1-hot encoded on the edges\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.encoders[stage][name] = nn.ModuleList([\n nn.Linear(1, self.latent_features, bias=bias),\n nn.Linear(1, self.latent_features, bias=bias)\n ])\n\n def add_decoder(self, stage, name, loc, data_type, data_sample, bias):\n assert name != 'adj', 'Adjacency matrix should not be decoded'\n dec = None\n if loc == Location.NODE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.Linear(2*self.latent_features, 1, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.Linear(2*self.latent_features, in_shape, bias=bias)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are decoded from both node and edge information\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if loc == Location.GRAPH:\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n in_shape = data_sample.shape[-1] if data_type == Type.CATEGORICAL else 1\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n\n if loc == Location.EDGE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n if data_type == Type.POINTER:\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n assert dec is not None, breakpoint()\n self.decoders[stage][name] = dec\n\n\n\n\n def __init__(self,\n spec,\n data,\n latent_features,\n algo_processor,\n bias=True,\n use_TF=False,\n use_sinkhorn=True,\n L1_loss=False,\n xavier_on_scalars=True,\n global_termination_pool='max', #'predinet',\n get_attention=False,\n use_batch_norm=False,\n transferring=False,\n timeit=True,\n **kwargs):\n\n super().__init__()\n self.step_idx = 0\n self.latent_features = latent_features\n self.assert_checks = False\n self.timeit = timeit\n self.debug = False\n self.debug_epoch_threshold = 1e9\n self.L1_loss = L1_loss\n self.global_termination_pool = global_termination_pool\n self.next_step_pool = True\n self.processor = algo_processor\n self.triplet_reasoning = False\n if isinstance(self.processor.processors[0].processor, TripletMPNN):\n self.triplet_reasoning = True\n self.triplet_reductor = nn.Linear(2*latent_features, latent_features, bias=bias)\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.get_attention = get_attention\n self.lambda_mul = 1 # 0.0001\n self.transferring = transferring\n self.node_encoder = nn.Sequential(\n nn.Linear(2*latent_features, latent_features, bias=bias),\n )\n self.encoders = nn.ModuleDict({\n 'input': nn.ModuleDict({\n }),\n 'hint': nn.ModuleDict({\n }),\n })\n self.decoders = nn.ModuleDict({\n 'hint': nn.ModuleDict({\n }),\n 'output': nn.ModuleDict({\n })\n })\n for name, (stage, loc, datatype) in spec.items():\n if name == 'adj': # we use edge indices\n continue\n if stage == 'input':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'output':\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'hint':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n\n self.node_pointer_vec = nn.Parameter(torch.randn(latent_features))\n if xavier_on_scalars:\n assert False, \"NEEDS REFACTORING\"\n torch.nn.init.trunc_normal_(self.encoders['input']['edge_attr'].weight, std=1/torch.sqrt(torch.tensor(latent_features)))\n\n if global_termination_pool == 'attention':\n inp_dim = latent_features\n self.global_attn = GlobalAttentionPlusCoef(\n nn.Sequential(\n nn.Linear(inp_dim, latent_features, bias=bias),\n nn.LeakyReLU(),\n nn.Linear(latent_features, 1, bias=bias)\n ),\n nn=None)\n\n if global_termination_pool == 'predinet':\n lf = latent_features\n self.predinet = PrediNet(lf, 1, lf, lf, flatten_pooling=torch_geometric.nn.glob.global_max_pool)\n\n self.termination_network = nn.Sequential(\n nn.BatchNorm1d(latent_features) if use_batch_norm else nn.Identity(),\n nn.Linear(latent_features, 1, bias=bias),\n )\n\n def get_continue_logits(self, batch_ids, latent_nodes, sth_else=None):\n if self.global_termination_pool == 'mean':\n graph_latent = torch_geometric.nn.global_mean_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'max':\n graph_latent = torch_geometric.nn.global_max_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'attention':\n graph_latent, coef = self.global_attn(latent_nodes, batch_ids)\n if self.get_attention:\n self.attentions[self.step_idx] = coef.clone().detach()\n self.per_step_latent[self.step_idx] = sth_else\n\n if self.global_termination_pool == 'predinet':\n assert not torch.isnan(latent_nodes).any()\n graph_latent = self.predinet(latent_nodes, batch_ids)\n\n if self.get_attention:\n self.attentions[self.step_idx] = latent_nodes\n continue_logits = self.termination_network(graph_latent).view(-1)\n return continue_logits\n\n def zero_termination(self):\n self.true_positive = 0\n self.false_positive = 0\n self.false_negative = 0\n self.true_negative = 0\n\n def zero_steps(self):\n self.sum_of_processed_nodes = 0\n self.sum_of_processed_edges = 0\n self.step_idx = 0\n self.sum_of_steps = 0\n self.cnt = 0\n\n @staticmethod\n def convert_logits_to_outputs(spec,\n logits,\n fr,\n to,\n num_nodes,\n batch_ids,\n include_probabilities=True,\n dbg=False):\n outs = defaultdict(dict)\n\n for stage in logits.keys():\n for name in logits[stage].keys():\n if name not in logits[stage] or name not in spec:\n continue\n stage, loc, data_type = spec[name]\n assert stage != Stage.INPUT\n if data_type == Type.SOFT_POINTER:\n assert False, f\"Not yet added, please add {name}\"\n if data_type in [Type.CATEGORICAL]:\n indices = logits[stage][name].argmax(-1)\n outshape = logits[stage][name].shape[-1]\n outs[stage][name] = F.one_hot(indices, num_classes=outshape).float()\n if data_type == Type.MASK_ONE:\n _, amax = torch_scatter.scatter_max(logits[stage][name], batch_ids, dim=0)\n amax = amax.squeeze(-1)\n outs[stage][name] = torch.zeros_like(logits[stage][name])\n outs[stage][name][amax] = 1\n if data_type == Type.MASK:\n outs[stage][name] = (logits[stage][name] > 0).float()\n if data_type == Type.SCALAR:\n outs[stage][name] = logits[stage][name]\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n _, pointers = torch_scatter.scatter_max(pointer_logits, fr, dim_size=num_nodes)\n pointers = to[pointers]\n pointer_probabilities = torch_geometric.utils.softmax(pointer_logits, fr, num_nodes=num_nodes)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n pointers = pointer_logits.argmax(-1)\n pointer_probabilities = F.softmax(pointer_logits, dim=-1)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n return outs\n\n def set_initial_states(self, batch, init_last_latent=None):\n self.processor.zero_lstm(batch.num_nodes) # NO-OP if processor(s) don't use LSTM\n self.last_latent = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n if init_last_latent is not None:\n self.last_latent = init_last_latent\n self.last_latent_edges = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n self.last_continue_logits = torch.ones(batch.num_graphs, device=batch.edge_index.device)\n self.last_logits = defaultdict(dict)\n\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n if name not in self.decoders[stage]:\n continue\n if stage == Stage.OUTPUT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n if data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name).unsqueeze(-1)\n if data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name).int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.last_logits[stage][name] = torch.full((batch.edge_index.shape[1], int(ptrs.max().item())+1), -1e9).to(batch.edge_index.device)\n self.last_logits[stage][name][torch.arange(ptrs.shape[0]), ptrs] = 1e9\n else:\n assert False, breakpoint()\n\n if stage == Stage.HINT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0].unsqueeze(-1)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n else:\n assert False, breakpoint()\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0, :].unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name)[0, :].int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.max_nodes_in_graph = int(ptrs.max().item())+1 # FIXME try another way to infer\n self.last_logits[stage][name] = torch.where(edge_one_hot_encode_pointers_edge(ptrs, batch, self.max_nodes_in_graph).bool(), 1e9, -1e9).to(batch.edge_index.device)\n else:\n assert False, breakpoint()\n\n self.all_hint_logits = []\n self.all_masks_graph = []\n\n def update_per_mask(self, before, after, mask=None):\n # NOTE: this does expansion of the mask, if you do\n # NOT use expansion, use torch.where\n if mask is None:\n mask = self.mask\n mask = mask.unsqueeze(-1).expand_as(before)\n return torch.where(mask, after, before)\n\n def update_state_dict(self, before, after):\n new_before = defaultdict(dict)\n for stage in after.keys():\n for name in after[stage].keys():\n _, loc, data_type = self.dataset_spec[name]\n if loc == Location.GRAPH:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_cp)\n if loc == Location.EDGE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL, Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_edges)\n else:\n assert False, \"Please implement\"\n if loc == Location.NODE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name])\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = torch.where(self.mask_edges, after[stage][name], before[stage][name])\n else:\n assert False, breakpoint()\n return new_before\n\n def update_states(self, batch, current_latent, edges_current_latent,\n logits, continue_logits):\n self.last_continue_logits = torch.where(self.mask_cp, continue_logits,\n self.last_continue_logits)\n self.last_latent = self.update_per_mask(self.last_latent, current_latent)\n self.last_latent_edges = self.update_per_mask(self.last_latent_edges, edges_current_latent, mask=self.mask_edges)\n self.last_logits = self.update_state_dict(self.last_logits, logits)\n self.all_hint_logits.append(self.last_logits['hint'])\n self.all_masks_graph.append(self.mask_cp)\n preds = type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)\n self.last_hint = preds['hint']\n self.last_output = preds['output']\n\n def prepare_initial_masks(self, batch):\n self.mask = torch.ones_like(batch.batch, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_cp = torch.ones(batch.num_graphs, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_edges = torch.ones_like(batch.edge_index[0], dtype=torch.bool, device=batch.edge_index.device)\n\n def loop_condition(self, termination, STEPS_SIZE):\n return (((not self.training and termination.any()) or\n (self.training and termination.any())) and\n self.step_idx+1 < STEPS_SIZE)\n\n def loop_body(self,\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=1000):\n\n current_latent, edges_current_latent, preds, continue_logits =\\\n self.forward(\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n first_n_processors=first_n_processors,\n )\n termination = continue_logits\n\n self.debug_batch = batch\n self.debug_hint_out_curr = hint_out_curr\n if self.timeit:\n st = time.time()\n self.update_states(batch, current_latent, edges_current_latent, preds, termination)\n if self.timeit:\n print(f'updating states: {time.time()-st}')\n\n def get_step_input(self, x_curr, batch):\n if self.training and self.use_TF or self.hardcode_outputs:\n return x_curr\n return type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)['hint']\n\n def encode_inputs(self, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.INPUT:\n continue\n if name not in self.encoders[stage]:\n continue\n data = getattr(batch, name)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n assert False, breakpoint() # we don't have it for now (B-F/MST), will figure out later\n if data_type != Type.CATEGORICAL:\n data = data.unsqueeze(-1)\n if loc == Location.EDGE:\n edge_fts += self.encoders[stage][name](data)\n if loc == Location.NODE:\n node_fts += self.encoders[stage][name](data)\n return node_fts, edge_fts\n\n def encode_hints(self, hints, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n graph_fts = torch.zeros(batch.num_graphs, self.latent_features, device=batch.edge_index.device)\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n if name not in self.encoders[stage]:\n continue\n hint = hints[name]\n if loc == Location.NODE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n node_fts = node_fts + self.encoders['hint'][name](hint)\n if loc == Location.EDGE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n edge_fts = edge_fts + self.encoders['hint'][name](hint)\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(hint, batch.edge_index)\n edge_fts = edge_fts + self.encoders['hint'][name](pred_gt_one_hot.unsqueeze(-1))\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers_edge(hint, batch, self.max_nodes_in_graph)\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n encoding = self.encoders['hint'][name][0](pred_gt_one_hot.unsqueeze(-1))\n encoding_2 = self.encoders['hint'][name][1](pred_gt_one_hot.unsqueeze(-1))\n encoding_sparse = SparseTensor(row=batch.edge_index[0], col=batch.edge_index[1], value=encoding)\n res_1 = encoding_sparse.mean(1)[batch.edge_index[0], batch.edge_index[1]-starts_edge]\n res_2 = encoding_2.mean(1)\n edge_fts += res_1 + res_2 # INPLACE\n if loc == Location.GRAPH and data_type in [Type.CATEGORICAL, Type.SCALAR, Type.MASK]:\n graph_fts = graph_fts + self.encoders['hint'][name](hint)\n return node_fts, edge_fts, graph_fts\n\n def get_input_output_hints(self, batch):\n hint_inp_curr = {}\n hint_out_curr = {}\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n hint_inp_curr[name] = getattr(batch, name)[self.step_idx]\n hint_out_curr[name] = getattr(batch, name)[self.step_idx+1]\n if 'mask' in data_type or data_type == Type.SCALAR:\n hint_inp_curr[name] = hint_inp_curr[name].unsqueeze(-1)\n hint_out_curr[name] = hint_out_curr[name].unsqueeze(-1)\n return hint_inp_curr, hint_out_curr\n\n def process(\n self,\n batch,\n EPSILON=0,\n enforced_mask=None,\n hardcode_outputs=False,\n debug=False,\n first_n_processors=1000,\n init_last_latent=None,\n **kwargs):\n\n SIZE, STEPS_SIZE = prepare_constants(batch)\n self.hardcode_outputs = hardcode_outputs\n\n # Pytorch Geometric batches along the node dimension, but we execute\n # along the temporal (step) dimension, hence we need to transpose\n # a few tensors. Done by `prepare_batch`.\n if self.assert_checks:\n check_edge_index_sorted(batch.edge_index)\n if self.epoch > self.debug_epoch_threshold:\n breakpoint()\n self.zero_steps()\n batch = type(self).prepare_batch(batch)\n # When we want to calculate last step metrics/accuracies\n # we need to take into account again different termination per graph\n # hence we save last step tensors (e.g. outputs) into their\n # corresponding tensor. The function below prepares these tensors\n # (all set to zeros, except masking for computation, which are ones)\n self.set_initial_states(batch, init_last_latent=init_last_latent)\n # Prepare masking tensors (each graph does at least 1 iteration of the algo)\n self.prepare_initial_masks(batch)\n # A flag if we had a wrong graph in the batch. Used for visualisation\n # of what went wrong\n self.wrong_flag = False\n assert self.mask_cp.all(), self.mask_cp\n if self.timeit:\n st = time.time()\n node_fts_inp, edge_fts_inp = self.encode_inputs(batch)\n if self.timeit:\n print(f'encoding inputs: {time.time()-st}')\n\n while True:\n hint_inp_curr, hint_out_curr = self.get_input_output_hints(batch)\n if not self.training:\n assert (self.last_continue_logits > 0).any() or True\n\n # Some algorithms output fewer values than they take\n # so if we reuse our last step outputs, they need to be fed back in.\n if self.timeit:\n st = time.time()\n hint_inp_curr = self.get_step_input(hint_inp_curr, batch)\n if self.timeit:\n print(f'getting step input : {time.time()-st}')\n st = time.time()\n node_fts_hint, edge_fts_hint, graph_fts = self.encode_hints(hint_inp_curr, batch)\n node_fts = node_fts_inp + node_fts_hint\n edge_fts = edge_fts_inp + edge_fts_hint\n if self.timeit:\n print(f'encoding hints: {time.time()-st}')\n\n true_termination = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n\n # Does one iteration of the algo and accumulates statistics\n self.loop_body(batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=first_n_processors)\n # And calculate what graphs would execute on the next step.\n self.mask_cp, self.mask, self.mask_edges = type(self).get_masks(self.training, batch, true_termination if self.training else self.last_continue_logits, enforced_mask)\n if not self.loop_condition(\n self.mask_cp,\n STEPS_SIZE):\n break\n assert self.mask_cp.any()\n self.step_idx += 1\n\n return self.all_hint_logits, self.last_logits, self.all_masks_graph\n\n def decode(self, batch, encoded_nodes, hidden, edge_fts, graph_fts):\n catted = torch.cat((encoded_nodes, hidden), dim=1)\n outs = defaultdict(dict)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n\n if loc == Location.NODE:\n\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name](catted)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n prod = self.decoders[stage][name][3](to.max(fr+edge)).squeeze(-1)\n if data_type in [Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION] and self.use_sinkhorn:\n prod = torch.maximum(prod, self.decoders[stage][name][3](fr.max(to+edge)).squeeze(-1))\n prod = sinkhorn_normalize(batch, prod, temperature=0.1, steps=10 if self.training else 60, add_noise=self.training)\n outs[stage][name] = prod\n\n if loc == Location.GRAPH:\n aggr_node_fts = torch_scatter.scatter_max(catted, batch.batch, dim=0)[0]\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name][0](aggr_node_fts) + self.decoders[stage][name][1](graph_fts)\n else:\n assert False\n\n if loc == Location.EDGE:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n if data_type in (Type.CATEGORICAL, Type.MASK, Type.SCALAR):\n outs[stage][name] = fr + to + edge\n elif data_type == Type.POINTER:\n pred = fr + to + edge\n pred_2 = self.decoders[stage][name][3](catted)\n ebatch = batch.edge_index_batch\n st = batch.ptr[ebatch]\n en = batch.ptr[ebatch+1]\n dense_pred_2, mask_pred_2 = tg_utils.to_dense_batch(pred_2, batch=batch.batch)\n edge_pred_2 = dense_pred_2[ebatch]\n mask_edge_pred_2 = mask_pred_2[ebatch]\n probs_logits = self.decoders[stage][name][4](torch.maximum(pred[:, None, :], edge_pred_2)).squeeze(-1)\n probs_logits[~mask_edge_pred_2] = -1e9\n outs[stage][name] = probs_logits\n else:\n assert False\n\n return outs\n\n def encode_nodes(self, current_input, last_latent):\n return torch.cat((current_input, last_latent), dim=1)\n\n def forward(self, batch, node_fts, edge_fts, graph_fts, first_n_processors=1000):\n if torch.isnan(node_fts).any():\n breakpoint()\n assert not torch.isnan(self.last_latent).any()\n assert not torch.isnan(node_fts).any()\n if self.timeit:\n st = time.time()\n if self.timeit:\n print(f'projecting nodes: {time.time()-st}')\n\n if self.timeit:\n st = time.time()\n edge_index = batch.edge_index\n hidden, edges_hidden = self.processor(node_fts, edge_fts, graph_fts, edge_index, self.last_latent, self.last_latent_edges, first_n_processors=first_n_processors, batch=batch)\n if self.timeit:\n print(f'message passing: {time.time()-st}')\n assert not torch.isnan(hidden).any()\n if self.timeit:\n st = time.time()\n if self.triplet_reasoning:\n edge_fts = self.triplet_reductor(torch.cat([edge_fts, edges_hidden], dim=-1))\n outs = self.decode(batch, node_fts, hidden, edge_fts, graph_fts)\n if self.timeit:\n print(f'decoding hints: {time.time()-st}')\n continue_logits = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n return hidden, edges_hidden, outs, continue_logits" }, { "identifier": "LitAlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class LitAlgorithmReasoner(pl.LightningModule):\n def __init__(self,\n hidden_dim,\n algo_processor,\n dataset_class,\n dataset_root,\n dataset_kwargs,\n algorithm='mst_prim',\n update_edges_hidden=False,\n use_TF=False,\n use_sinkhorn=True,\n xavier_on_scalars=True,\n learning_rate=get_hyperparameters()['lr'],\n weight_decay=get_hyperparameters()['weight_decay'],\n test_with_val=False,\n test_with_val_every_n_epoch=20,\n test_train_every_n_epoch=20,\n **algorithm_base_kwargs):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.dataset_class = dataset_class\n self.dataset_root = dataset_root\n self.dataset_kwargs = dataset_kwargs\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n self.timeit = False\n self.update_edges_hidden = update_edges_hidden\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.algorithm = algorithm\n self.xavier_on_scalars = xavier_on_scalars\n self.test_with_val = test_with_val\n self.test_with_val_every_n_epoch = test_with_val_every_n_epoch\n self.test_train_every_n_epoch = test_train_every_n_epoch\n self._datasets = {}\n if self.test_with_val:\n self.val_dataloader = self.val_dataloader_alt\n self.validation_step = self.validation_step_alt\n self._current_epoch = 0\n self.load_dataset('train')\n\n self.algorithm_module = AlgorithmReasoner(self.dataset.spec,\n self.dataset[0],\n hidden_dim,\n algo_processor,\n update_edges_hidden=update_edges_hidden,\n use_TF=use_TF,\n use_sinkhorn=use_sinkhorn,\n timeit=self.timeit,\n xavier_on_scalars=xavier_on_scalars,\n **algorithm_base_kwargs)\n self.save_hyperparameters(ignore=['algo_processor'])\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch in the ``Trainer``, or 0 if not attached.\"\"\"\n return self.trainer.current_epoch if self._trainer else self._current_epoch\n\n @current_epoch.setter\n def current_epoch(self, epoch) -> int:\n self._current_epoch = epoch\n\n def prepare_for_transfer(self):\n algo_processor = copy.deepcopy(self.algorithm_module.processor)\n self.algorithm_module = AlgorithmReasoner(self.hidden_dim,\n self.node_features,\n self.edge_features,\n self.output_features,\n algo_processor,\n use_TF=False,\n timeit=self.timeit,\n **self.algorithm_base_kwargs)\n for p in self.algorithm_module.processor.parameters():\n p.requires_grad = False\n\n @staticmethod\n def pointer_loss(predecessor_pred, predecessor_gt_edge_1h,\n softmax_idx, num_nodes):\n loss_unreduced = cross_entropy(predecessor_pred, softmax_idx, predecessor_gt_edge_1h, num_nodes)\n sum_loss = loss_unreduced.flatten().sum()\n cnt_loss = predecessor_gt_edge_1h.count_nonzero()\n return sum_loss / cnt_loss\n\n def single_prediction_loss(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n loss = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[graph_mask], pred_gt[graph_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n\n if loc == Location.NODE:\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(pred_gt, batch.edge_index)\n loss = type(self).pointer_loss(\n pred[edge_mask],\n pred_gt_one_hot[edge_mask],\n batch.edge_index[0][edge_mask], batch.num_nodes)\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.MASK_ONE:\n lsms = torch_scatter.scatter_log_softmax(pred[node_mask], batch.batch[node_mask].unsqueeze(-1), dim=0)\n loss = (-lsms[(pred_gt[node_mask] == 1.)]).mean()\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[node_mask], pred_gt[node_mask].argmax(-1))\n if loc == Location.EDGE:\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[edge_mask], pred_gt[edge_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n loss = F.cross_entropy(\n pred[edge_mask],\n pred_gt[edge_mask])\n assert loss is not None, f'{stage}/{name}/{loc}/{data_type}'\n return loss\n\n def get_step_loss(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n if self.timeit:\n st = time.time()\n batch = self.algorithm_module.prepare_batch(batch)\n losses_dict = defaultdict(list)\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n assert graph_mask.any()\n for name in pred:\n stage, loc, data_type = self.dataset.spec[name]\n pred_gt = getattr(batch, name)[i+1]\n losses_dict[name].append(\n self.single_prediction_loss(name, pred[name], pred_gt,\n batch, graph_mask, node_mask,\n edge_mask))\n\n for name in output_logits:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n losses_dict[name].append(\n self.single_prediction_loss(name, output_logits[name],\n getattr(batch, name), batch,\n graph_mask, node_mask, edge_mask))\n\n for k, v in losses_dict.items():\n losses_dict[k] = torch.stack(v).mean()\n if self.timeit:\n print(f'loss calculation: {time.time()-st}')\n input()\n\n return losses_dict\n\n def single_prediction_acc(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n acc = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.NODE:\n if data_type == Type.MASK_ONE:\n # try:\n acc = (pred[node_mask].squeeze(-1).nonzero() == pred_gt[node_mask].nonzero()).float().mean()\n # except Exception as e:\n # breakpoint()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION, Type.MASK]:\n acc = (pred[node_mask].squeeze(-1) == pred_gt[node_mask]).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[node_mask].squeeze(-1) - pred_gt[node_mask])**2).mean()\n if data_type == Type.CATEGORICAL:\n acc = (pred[node_mask].argmax(-1) == pred_gt[node_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[node_mask].squeeze(-1), pred_gt[node_mask])\n\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n acc = (pred[graph_mask].argmax(-1) == pred_gt[graph_mask].argmax(-1)).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[graph_mask].squeeze(-1) - pred_gt[graph_mask])**2).mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[graph_mask].squeeze(-1), pred_gt[graph_mask])\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n acc = (pred[edge_mask].argmax(-1) == pred_gt[edge_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[edge_mask].squeeze(-1), pred_gt[edge_mask])\n if data_type == Type.SCALAR:\n acc = ((pred[edge_mask].squeeze(-1) - pred_gt[edge_mask])**2).mean()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n acc = (pred[edge_mask] == pred_gt[edge_mask]).float().mean()\n assert acc is not None, f\"Please implement {name}\"\n return acc\n\n def get_metrics(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n batch = self.algorithm_module.prepare_batch(batch)\n accs_dict = defaultdict(list)\n\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec, {'hint': pred},\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['hint']\n\n for name in outputs:\n acc = self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name)[i+1],\n batch,\n graph_mask,\n node_mask,\n edge_mask)\n accs_dict[name].append(acc)\n\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec,\n output_logits,\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['output']\n for name in outputs:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n accs_dict[name].append(\n self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name),\n batch,\n graph_mask,\n node_mask,\n edge_mask))\n\n for k, v in accs_dict.items():\n accs_dict[k] = torch.stack(v).mean()\n\n return accs_dict\n\n def fwd_step(self, batch, batch_idx):\n if self.timeit:\n st = time.time()\n self.algorithm_module.epoch = self.current_epoch\n all_hint_logits, output_logits, masks = self.algorithm_module.process(batch)\n if self.timeit:\n print(f'forward step: {time.time()-st}')\n input()\n return all_hint_logits, output_logits, masks\n\n def training_step(self, batch, batch_idx):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'train/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs)\n total_loss = sum(losses_dict.values()) / len(losses_dict)\n self.log('train/loss/average_loss', total_loss, prog_bar=False, on_step=True, on_epoch=True, batch_size=batch.num_graphs)\n accs_dict = {}\n if self.current_epoch % self.test_train_every_n_epoch == 0:\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'train/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n # if sum(losses_dict.values()) > 1e5:\n # breakpoint()\n return {'loss': total_loss, 'losses_dict': losses_dict, 'accuracies': accs_dict}\n\n def valtest_step(self, batch, batch_idx, mode):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'{mode}/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n if torch.isnan(sum(losses_dict.values())).any():\n breakpoint()\n self.log(f'{mode}/loss/average_loss', sum(losses_dict.values()) / len(losses_dict), batch_size=batch.num_graphs, add_dataloader_idx=False)\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'{mode}/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n return {'losses': losses_dict, 'accuracies': accs_dict}\n\n def validation_step_alt(self, batch, batch_idx, dataloader_idx):\n if dataloader_idx == 1 and not self.trainer.state.stage == 'sanity_check' and self.current_epoch % self.test_with_val_every_n_epoch == 0:\n return self.valtest_step(batch, batch_idx, 'periodic_test')\n if dataloader_idx == 0:\n return self.valtest_step(batch, batch_idx, 'val')\n\n def validation_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'val')\n\n def test_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'test')\n\n def predict_step(self, batch, batch_idx):\n return self.fwd_step(batch, batch_idx)\n\n def load_dataset(self, split, suffix=''):\n split = split+suffix\n nn = CONFIGS[self.algorithm][split]['num_nodes']\n self.dataset_kwargs['split'] = split\n if (split, nn) not in self._datasets:\n self._datasets[(split, nn)] = self.dataset_class(\n self.dataset_root,\n nn,\n CONFIGS[self.algorithm][split]['num_samples'],\n algorithm=self.algorithm,\n **self.dataset_kwargs)\n self.dataset = self._datasets[(split, nn)]\n print(f'Loading {self.dataset=} (num nodes: {nn}) with kwargs')\n pprint(self.dataset_kwargs)\n print()\n\n def get_a_loader(self, split, suffix=''):\n self.load_dataset(split, suffix='')\n self.algorithm_module.dataset_spec = self.dataset.spec\n dl = DataLoader(self.dataset,\n batch_size=get_hyperparameters()['batch_size'],\n shuffle=True if split == 'train' else False,\n drop_last=False,\n follow_batch=['edge_index'],\n num_workers=1,\n persistent_workers=True)\n return dl\n\n def train_dataloader(self):\n return self.get_a_loader('train')\n\n def val_dataloader_alt(self):\n return [self.get_a_loader('val'), self.get_a_loader('test')]\n\n def val_dataloader(self):\n return self.get_a_loader('val')\n\n def test_dataloader(self, suffix=''):\n return self.get_a_loader('test'+suffix)\n\n def configure_optimizers(self):\n lr = self.learning_rate\n wd = self.weight_decay\n optimizer = optim.Adam(self.parameters(),\n weight_decay=wd,\n lr=lr)\n return optimizer" }, { "identifier": "get_hyperparameters", "path": "hyperparameters.py", "snippet": "def get_hyperparameters():\n return {\n 'dim_latent': 128,\n 'num_bits': 8,\n 'weight_decay': 0,\n 'lr': 0.0003,\n 'nee_warmup_steps': 4000,\n 'dim_nodes_mst_prim': 1,\n 'dim_target_mst_prim': 1,\n 'device': 'cuda',\n 'batch_size': 64,\n 'bias': True,\n 'seed': 47, # for dataset generation\n 'calculate_termination_statistics': False,\n }" }, { "identifier": "CONFIGS", "path": "datasets/_configs.py", "snippet": "CONFIGS = defaultdict(lambda: _DEFAULT_CONFIG)" }, { "identifier": "cross_entropy", "path": "utils_execution.py", "snippet": "def cross_entropy(pred, softmax_idx, truth_1h, num_nodes):\n lsm_pred = torch.log(torch_geometric.utils.softmax(pred, softmax_idx, num_nodes=num_nodes)+1e-9)\n # truth_1h = F.one_hot(truth, num_nodes)\n return (-truth_1h*lsm_pred)" }, { "identifier": "check_edge_index_sorted", "path": "utils_execution.py", "snippet": "def check_edge_index_sorted(ei):\n for i in range(ei.shape[1]-1):\n assert ei[0][i] <= ei[0][i+1]\n if ei[0][i] == ei[0][i+1]:\n assert ei[1][i] < ei[1][i+1]" }, { "identifier": "prepare_constants", "path": "utils_execution.py", "snippet": "def prepare_constants(batch):\n SIZE = batch.num_nodes\n STEPS_SIZE = batch.lengths.max()-1\n return SIZE, STEPS_SIZE" }, { "identifier": "edge_one_hot_encode_pointers", "path": "utils_execution.py", "snippet": "def edge_one_hot_encode_pointers(pred, edge_index):\n pred_ei = torch.stack((torch.arange(pred.shape[0]).to(pred), pred))\n amat = torch_geometric.utils.to_dense_adj(pred_ei)\n return amat[0, edge_index[0], edge_index[1]]" }, { "identifier": "get_number_of_nodes", "path": "utils_execution.py", "snippet": "def get_number_of_nodes(algorithm, split):\n nns = CONFIGS[algorithm][split]['num_nodes']\n if isinstance(nns, int):\n nns = [nns]\n return nns" } ]
from collections import defaultdict from pprint import pprint from torch_geometric.loader import DataLoader from pytorch_lightning.trainer.supporters import CombinedLoader from baselines.beam_search import vmapped_beam_search_rollout, BEAM_WIDTH from models.algorithm_reasoner import AlgorithmReasoner, LitAlgorithmReasoner from hyperparameters import get_hyperparameters from torch_geometric.utils import k_hop_subgraph from datasets._configs import CONFIGS from utils_execution import cross_entropy, check_edge_index_sorted, prepare_constants, edge_one_hot_encode_pointers, get_number_of_nodes from clrs import Type, Location, Stage import copy import itertools import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch_scatter import torch_geometric import pytorch_lightning as pl
14,905
transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = TSPReasoner(self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, use_TF=use_TF, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.ensure_permutation = ensure_permutation self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_tour_metrics(self, output_logits, batch): def get_mask(edges): mask = torch.zeros_like(batch.edge_index[0]) j = 0 for i in range(batch.edge_index.shape[1]): u1, v1 = batch.edge_index[:, i] u2, v2 = edges[:, j] if u1 == u2 and v1 == v2: mask[i] = 1 j += 1 if j == edges.shape[1]: break assert j == edges.shape[1] return mask def get_mask_v2(edges): dense_edges = torch_geometric.utils.to_dense_adj(edges, batch=batch.batch).bool() dense_edges_batch = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch).bool() edge_index, mask = torch_geometric.utils.dense_to_sparse(((dense_edges & dense_edges_batch).float()+1)) mask = mask - 1 return mask acc = None # st = time.time() outputs = type(self.algorithm_module).convert_logits_to_outputs( self.dataset.spec, output_logits, batch.edge_index[0], batch.edge_index[1], batch.num_nodes, batch.batch, include_probabilities=False)['output'] for name in outputs: pred = outputs[name] pred_gt = getattr(batch, name) stage, loc, data_type = self.dataset.spec[name] if loc == Location.NODE: if name == 'predecessor_index': tours = torch.stack([torch.arange(pred.shape[0]).to(pred), pred]) mask = get_mask_v2(tours).bool() st = time.time() mattr = batch.edge_attr[mask] mbatch = batch.edge_index_batch[mask] msrc, mdst = batch.edge_index[:, mask] tour_len = torch_scatter.scatter_sum(mattr, mbatch) tour_correctness = torch_scatter.scatter_sum((msrc == mdst.sort().values), mbatch) assert sum(tour_correctness)/len(tour_correctness) == 1 return dict(tour_len=tour_len.mean(), tour_len_gt=batch.optimal_value.mean().item(), tour_correctness=sum(tour_correctness)/len(tour_correctness), tour_relative_error=((tour_len-batch.optimal_value)/batch.optimal_value).mean()) def process_TSP_tour_greedy(self, batch, output_logits): mask_active_nodes = torch.tensor(batch.start_route).bool() mask_edges_to_nodes_in_tour = torch.zeros_like(batch.edge_index[0]).bool() max_nodes_per_graph = batch.batch.unique(return_counts=True)[1].max() num_nodes_per_graph = batch.num_nodes // batch.num_graphs for _ in range(max_nodes_per_graph - 1): mask_active_edges = mask_active_nodes[batch.edge_index[0]] & ~mask_edges_to_nodes_in_tour # Any edge outwards of active nodes and not pointing to previously used node mask_edges_to_nodes_in_tour |= mask_active_nodes[batch.edge_index[1]] # any edge towards the active nodes should not be used in future iterations sloops = (batch.edge_index[0] == batch.edge_index[1]) preds = output_logits['output']['predecessor_index'].clone() preds = preds.masked_fill(~mask_active_edges | sloops, -1e6) # nudge the max value to ensure there is a unique maximum max_idxs = preds.reshape(-1, num_nodes_per_graph).argmax(-1) max_idxs = F.one_hot(max_idxs, num_nodes_per_graph) preds[max_idxs.bool().flatten()] = (preds.reshape(-1, num_nodes_per_graph)[max_idxs.bool()] + 1e-4).flatten() output_logits['output']['predecessor_index'][mask_active_nodes[batch.edge_index[0]]] = preds[mask_active_nodes[batch.edge_index[0]]] new_active_nodes = preds.reshape(-1, num_nodes_per_graph).argmax(-1)[mask_active_nodes.bool()].unsqueeze(-1) # NOTE the reshape/flatten mechanic may not work if graphs in the same batch are of different sizes (consider using torch_scatter.scatter_max) mask_active_nodes = F.one_hot(new_active_nodes, num_nodes_per_graph).flatten().bool() final_pred_mask = mask_active_nodes[batch.edge_index[0]] & batch.start_route.bool()[batch.edge_index[1]] output_logits['output']['predecessor_index'] = output_logits['output']['predecessor_index'].masked_fill(final_pred_mask, 1e8) return output_logits def process_TSP_tour_BS(self, batch, output_logits): start_route = torch_geometric.utils.to_dense_batch(batch.start_route, batch=batch.batch)[0] dens_logits = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch, edge_attr=output_logits['output']['predecessor_index']) num_nodes = start_route.shape[1] # st = time.time() tours = torch.tensor(np.array(vmapped_beam_search_rollout( start_route.cpu().detach().numpy(), -dens_logits.cpu().detach().numpy(),
class TSPReasoner(AlgorithmReasoner): def __init__(self, spec, data, latent_features, algo_processor, bias=True, use_TF=False, L1_loss=False, global_termination_pool='max', #'predinet', get_attention=False, use_batch_norm=False, transferring=False, timeit=True, double_process=False, **algo_reasoner_kwargs): super().__init__( spec, data, latent_features, algo_processor, use_TF=use_TF, timeit=timeit, L1_loss=L1_loss, global_termination_pool=global_termination_pool, get_attention=get_attention, use_batch_norm=use_batch_norm, transferring=transferring, **algo_reasoner_kwargs, ) self.step_idx = 0 self.assert_checks = False self.debug = False self.debug_epoch_threshold = 1e9 self.next_step_pool = True self.double_process = double_process self.lambda_mul = 1# 0.0001 self.transferring = transferring def get_input_output_hints(self, batch): hint_inp_curr = dict() hint_out_curr = dict() return hint_inp_curr, hint_out_curr def process( self, *args, **kwargs): self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, first_n_processors=1000 if not self.double_process else 1, **kwargs) if self.double_process: self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, init_last_latent=self.last_latent, **kwargs) return self.all_hint_logits, self.last_logits, self.all_masks_graph class LitTSPReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, use_TF=False, ensure_permutation='greedy', transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, use_TF=use_TF, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = TSPReasoner(self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, use_TF=use_TF, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.ensure_permutation = ensure_permutation self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_tour_metrics(self, output_logits, batch): def get_mask(edges): mask = torch.zeros_like(batch.edge_index[0]) j = 0 for i in range(batch.edge_index.shape[1]): u1, v1 = batch.edge_index[:, i] u2, v2 = edges[:, j] if u1 == u2 and v1 == v2: mask[i] = 1 j += 1 if j == edges.shape[1]: break assert j == edges.shape[1] return mask def get_mask_v2(edges): dense_edges = torch_geometric.utils.to_dense_adj(edges, batch=batch.batch).bool() dense_edges_batch = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch).bool() edge_index, mask = torch_geometric.utils.dense_to_sparse(((dense_edges & dense_edges_batch).float()+1)) mask = mask - 1 return mask acc = None # st = time.time() outputs = type(self.algorithm_module).convert_logits_to_outputs( self.dataset.spec, output_logits, batch.edge_index[0], batch.edge_index[1], batch.num_nodes, batch.batch, include_probabilities=False)['output'] for name in outputs: pred = outputs[name] pred_gt = getattr(batch, name) stage, loc, data_type = self.dataset.spec[name] if loc == Location.NODE: if name == 'predecessor_index': tours = torch.stack([torch.arange(pred.shape[0]).to(pred), pred]) mask = get_mask_v2(tours).bool() st = time.time() mattr = batch.edge_attr[mask] mbatch = batch.edge_index_batch[mask] msrc, mdst = batch.edge_index[:, mask] tour_len = torch_scatter.scatter_sum(mattr, mbatch) tour_correctness = torch_scatter.scatter_sum((msrc == mdst.sort().values), mbatch) assert sum(tour_correctness)/len(tour_correctness) == 1 return dict(tour_len=tour_len.mean(), tour_len_gt=batch.optimal_value.mean().item(), tour_correctness=sum(tour_correctness)/len(tour_correctness), tour_relative_error=((tour_len-batch.optimal_value)/batch.optimal_value).mean()) def process_TSP_tour_greedy(self, batch, output_logits): mask_active_nodes = torch.tensor(batch.start_route).bool() mask_edges_to_nodes_in_tour = torch.zeros_like(batch.edge_index[0]).bool() max_nodes_per_graph = batch.batch.unique(return_counts=True)[1].max() num_nodes_per_graph = batch.num_nodes // batch.num_graphs for _ in range(max_nodes_per_graph - 1): mask_active_edges = mask_active_nodes[batch.edge_index[0]] & ~mask_edges_to_nodes_in_tour # Any edge outwards of active nodes and not pointing to previously used node mask_edges_to_nodes_in_tour |= mask_active_nodes[batch.edge_index[1]] # any edge towards the active nodes should not be used in future iterations sloops = (batch.edge_index[0] == batch.edge_index[1]) preds = output_logits['output']['predecessor_index'].clone() preds = preds.masked_fill(~mask_active_edges | sloops, -1e6) # nudge the max value to ensure there is a unique maximum max_idxs = preds.reshape(-1, num_nodes_per_graph).argmax(-1) max_idxs = F.one_hot(max_idxs, num_nodes_per_graph) preds[max_idxs.bool().flatten()] = (preds.reshape(-1, num_nodes_per_graph)[max_idxs.bool()] + 1e-4).flatten() output_logits['output']['predecessor_index'][mask_active_nodes[batch.edge_index[0]]] = preds[mask_active_nodes[batch.edge_index[0]]] new_active_nodes = preds.reshape(-1, num_nodes_per_graph).argmax(-1)[mask_active_nodes.bool()].unsqueeze(-1) # NOTE the reshape/flatten mechanic may not work if graphs in the same batch are of different sizes (consider using torch_scatter.scatter_max) mask_active_nodes = F.one_hot(new_active_nodes, num_nodes_per_graph).flatten().bool() final_pred_mask = mask_active_nodes[batch.edge_index[0]] & batch.start_route.bool()[batch.edge_index[1]] output_logits['output']['predecessor_index'] = output_logits['output']['predecessor_index'].masked_fill(final_pred_mask, 1e8) return output_logits def process_TSP_tour_BS(self, batch, output_logits): start_route = torch_geometric.utils.to_dense_batch(batch.start_route, batch=batch.batch)[0] dens_logits = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch, edge_attr=output_logits['output']['predecessor_index']) num_nodes = start_route.shape[1] # st = time.time() tours = torch.tensor(np.array(vmapped_beam_search_rollout( start_route.cpu().detach().numpy(), -dens_logits.cpu().detach().numpy(),
num_nodes, BEAM_WIDTH)), device=start_route.device)
0
2023-11-20 15:32:43+00:00
24k
bearyi26/DCPT
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n CVPR, 2019\n https://arxiv.org/pdf/1809.07845.pdf\n\n Download the dataset from https://cis.temple.edu/lasot/download.html\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_dir if root is None else root\n super().__init__('LaSOT', root, image_loader)\n\n # Keep a list of all classes\n self.class_list = [f for f in os.listdir(self.root)]\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n with open(out_of_view_file, 'r') as f:\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k", "path": "lib/train/dataset/got10k.py", "snippet": "class Got10k(BaseVideoDataset):\n \"\"\" GOT-10k dataset.\n\n Publication:\n GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n Lianghua Huang, Xin Zhao, and Kaiqi Huang\n arXiv:1810.11981, 2018\n https://arxiv.org/pdf/1810.11981.pdf\n\n Download dataset from http://got-10k.aitestunion.com/downloads\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().got10k_dir if root is None else root\n super().__init__('GOT10k', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\n return sequence_meta_info\n\n def _read_meta(self, seq_path):\n try:\n with open(os.path.join(seq_path, 'meta_info.ini')) as f:\n meta_info = f.readlines()\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\n 'motion_class': meta_info[6].split(': ')[-1][:-1],\n 'major_class': meta_info[7].split(': ')[-1][:-1],\n 'root_class': meta_info[8].split(': ')[-1][:-1],\n 'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n with open(os.path.join(self.root, 'list.txt')) as f:\n dir_list = list(csv.reader(f))\n dir_list = [dir_name[0] for dir_name in dir_list]\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n with open(cover_file, 'r', newline='') as f:\n cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(self.root, self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "TrackingNet", "path": "lib/train/dataset/tracking_net.py", "snippet": "class TrackingNet(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_dir if root is None else root\n super().__init__('TrackingNet', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root, self.set_ids)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\n low_memory=False).values\n return torch.tensor(gt)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n return self.image_loader(frame_path)\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID", "path": "lib/train/dataset/imagenetvid.py", "snippet": "class ImagenetVID(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid\", root, image_loader)\n\n cache_file = os.path.join(root, 'cache.json')\n if os.path.isfile(cache_file):\n # If available, load the pre-processed cache file containing meta-info for each sequence\n with open(cache_file, 'r') as f:\n sequence_list_dict = json.load(f)\n\n self.sequence_list = sequence_list_dict\n else:\n # Else process the imagenet annotations and generate the cache file\n self.sequence_list = self._process_anno(root)\n\n with open(cache_file, 'w') as f:\n json.dump(self.sequence_list, f)\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return self.image_loader(frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta\n\n def _process_anno(self, root):\n # Builds individual tracklets\n base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\n\n all_sequences = []\n for set in sorted(os.listdir(base_vid_anno_path)):\n set_id = int(set.split('_')[-1])\n for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\n\n vid_id = int(vid.split('_')[-1])\n anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\n\n frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\n image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\n\n objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\n for f in anno_files]\n\n tracklets = {}\n\n # Find all tracklets along with start frame\n for f_id, all_targets in enumerate(objects):\n for target in all_targets:\n tracklet_id = target.find('trackid').text\n if tracklet_id not in tracklets:\n tracklets[tracklet_id] = f_id\n\n for tracklet_id, tracklet_start in tracklets.items():\n tracklet_anno = []\n target_visible = []\n class_name_id = None\n\n for f_id in range(tracklet_start, len(objects)):\n found = False\n for target in objects[f_id]:\n if target.find('trackid').text == tracklet_id:\n if not class_name_id:\n class_name_id = target.find('name').text\n x1 = int(target.find('bndbox/xmin').text)\n y1 = int(target.find('bndbox/ymin').text)\n x2 = int(target.find('bndbox/xmax').text)\n y2 = int(target.find('bndbox/ymax').text)\n\n tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\n target_visible.append(target.find('occluded').text == '0')\n\n found = True\n break\n if not found:\n break\n\n new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\n 'start_frame': tracklet_start, 'anno': tracklet_anno,\n 'target_visible': target_visible, 'image_size': image_size}\n all_sequences.append(new_sequence)\n\n return all_sequences" }, { "identifier": "MSCOCOSeq", "path": "lib/train/dataset/coco_seq.py", "snippet": "class MSCOCOSeq(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n # Load the COCO set.\n self.coco_set = COCO(self.anno_path)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "BDD100K_Night", "path": "lib/train/dataset/bdd100k_night.py", "snippet": "class BDD100K_Night(BaseVideoDataset):\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None):\n root = env_settings().bdd100k_dir if root is None else root\n super().__init__('bdd100k_night', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/')\n self.anno_path = os.path.join(root, 'annotations/bdd100k_night.json')\n\n # load dataset\n self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()\n self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n if not self.anno_path == None:\n print('loading annotations into memory...')\n tic = time.time()\n with open(self.anno_path, 'r') as f:\n dataset = json.load(f)\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.sequence_list = self._get_sequence_list()\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n\n #得到序列\n def _get_sequence_list(self):\n anns = {}\n for picture in self.dataset:\n for box in picture['labels']:\n anns[box['id']] = box\n anns[box['id']]['name'] = picture['name']\n self.anns = anns\n\n #anns对应的是每一个框\n seq_list = list(anns.keys())\n\n return seq_list\n\n def _get_anno(self, seq_id):\n anno = self.anns[self.sequence_list[seq_id]]\n return anno\n\n\n #得到图片帧\n def _get_frames(self, seq_id):\n path = self.anns[self.sequence_list[seq_id]]['name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n #得到每一帧的bounding box\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n x = anno['box2d']['x1']\n y = anno['box2d']['y1']\n width = anno['box2d']['x2'] - anno['box2d']['x1']\n height = anno['box2d']['y2'] - anno['box2d']['y1']\n\n bbox = torch.Tensor([x,y,width,height]).view(1, 4)\n\n '''v0.4 BDD100K_Night avoid too small bounding boxes'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def is_video_sequence(self):\n return False\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # BDD100K is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta\n\n def get_name(self):\n return 'bdd100k_night'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.anns[self.sequence_list[seq_id]]['category']\n object_meta = OrderedDict({'object_class_name': cat_dict_current,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta" }, { "identifier": "SHIFT_Night", "path": "lib/train/dataset/shift_night.py", "snippet": "class SHIFT_Night(BaseVideoDataset):\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None):\n \"\"\"\n SHIFT_NIGHT Dataset\n \"\"\"\n root = env_settings().shift_dir if root is None else root\n super().__init__('shift_night', root, image_loader)\n\n sequence_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n sequence_path = os.path.join(sequence_path, 'data_specs', 'shift_info_1fps.json')\n with open(sequence_path, 'r') as f:\n info = json.load(f)\n self.info = info\n\n self.sequence_list = self._build_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n def _build_sequence_list(self):\n sequence_list = [sequence for sequence in self.info.keys()]\n return sequence_list\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n video_name = seq_name.split('/')[0]\n return os.path.join(self.root, video_name), seq_name\n\n def _get_frame_path(self, seq_path, seq_name, frame_id):\n frame = self.info[seq_name]['frame'][frame_id]\n return os.path.join(seq_path, frame) # frames extracted from info.json\n\n def _get_frame(self, seq_path, seq_name, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, seq_name, frame_id))\n\n def _read_bb_anno(self, seq_path, seq_name):\n bbox_all = []\n for bbox in self.info[seq_name]['box2d']:\n x = bbox['x1']\n y = bbox['y1']\n width = bbox['x2'] - bbox['x1']\n height = bbox['y2'] - bbox['y1']\n bbox_np = np.array([[x,y,width,height]])\n bbox_all.append(bbox_np)\n bbox_all_np = np.concatenate([bbox for bbox in bbox_all],axis=0)\n return torch.tensor(bbox_all_np)\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n seq_path, seq_name = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path, seq_name)\n\n '''v0.4 Shift avoid too small bounding boxes'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def get_name(self):\n return 'shift_night'\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path, seq_name = self._get_sequence_path(seq_id)\n\n frame_list = [self._get_frame(seq_path, seq_name, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': self.info[seq_name]['category'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ExDark", "path": "lib/train/dataset/exdark.py", "snippet": "class ExDark(BaseVideoDataset):\n \"\"\" The ExDark dataset. ExDark is an image dataset. Thus, we treat each image as a sequence of length 1.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n \"\"\"\n root = env_settings().exdark_dir if root is None else root\n super().__init__('exdark', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/')\n self.anno_path = os.path.join(root, 'annotations/annotations.json')\n\n # Load the COCO set.\n self.coco_set = COCO(self.anno_path)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'exdark'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''v0.4 ExDark avoid too small bounding boxes'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # ExDark is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k_lmdb", "path": "lib/train/dataset/got10k_lmdb.py", "snippet": "class Got10k_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n use_lmdb - whether the dataset is stored in lmdb format\n \"\"\"\n root = env_settings().got10k_lmdb_dir if root is None else root\n super().__init__('GOT10k_lmdb', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n def _read_meta(meta_info):\n\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\n 'motion_class': meta_info[6].split(': ')[-1],\n 'major_class': meta_info[7].split(': ')[-1],\n 'root_class': meta_info[8].split(': ')[-1],\n 'motion_adverb': meta_info[9].split(': ')[-1]})\n\n return object_meta\n sequence_meta_info = {}\n for s in self.sequence_list:\n try:\n meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" %s)\n sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\n except:\n sequence_meta_info[s] = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return sequence_meta_info\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n dir_str = decode_str(self.root, 'train/list.txt')\n dir_list = dir_str.split('\\n')\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line in got10k is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # full occlusion and out_of_view files\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n # Read these files\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1])) # the last line in got10k is empty\n occlusion = torch.ByteTensor(occ_list)\n cover_list = list(map(int, decode_str(self.root, cover_file).split('\\n')[:-1])) # the last line in got10k is empty\n cover = torch.ByteTensor(cover_list)\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(\"train\", self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "Lasot_lmdb", "path": "lib/train/dataset/lasot_lmdb.py", "snippet": "class Lasot_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_lmdb_dir if root is None else root\n super().__init__('LaSOT_lmdb', root, image_loader)\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\n self.class_list = []\n for ele in class_list:\n if ele not in self.class_list:\n self.class_list.append(ele)\n # Keep a list of all classes\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\n occlusion = torch.ByteTensor(occ_list)\n out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\n out_of_view = torch.ByteTensor(out_view_list)\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID_lmdb", "path": "lib/train/dataset/imagenetvid_lmdb.py", "snippet": "class ImagenetVID_lmdb(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid_lmdb\", root, image_loader)\n\n sequence_list_dict = decode_json(root, \"cache.json\")\n self.sequence_list = sequence_list_dict\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid_lmdb'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return decode_img(self.root, frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "MSCOCOSeq_lmdb", "path": "lib/train/dataset/coco_seq_lmdb.py", "snippet": "class MSCOCOSeq_lmdb(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO_lmdb', root, image_loader)\n self.root = root\n self.img_pth = 'images/{}{}/'.format(split, version)\n self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\n\n # Load the COCO set.\n print('loading annotations into memory...')\n tic = time.time()\n coco_json = decode_json(root, self.anno_path)\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n\n self.coco_set = COCO(coco_json)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n # img = self.image_loader(os.path.join(self.img_pth, path))\n img = decode_img(self.root, os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "TrackingNet_lmdb", "path": "lib/train/dataset/tracking_net_lmdb.py", "snippet": "class TrackingNet_lmdb(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_lmdb_dir if root is None else root\n super().__init__('TrackingNet_lmdb', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\n train_cls=False, pos_prob=0.5):\n def __len__(self):\n def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\n allow_invisible=False, force_invisible=False):\n def __getitem__(self, index):\n def getitem(self):\n def getitem_cls(self):\n def get_center_box(self, H, W, ratio=1/8):\n def sample_seq_from_dataset(self, dataset, is_video_dataset):\n def get_one_search(self):\n def get_frame_ids_trident(self, visible):\n def get_frame_ids_stark(self, visible, valid):\nclass TrackingSampler(torch.utils.data.Dataset):\n H, W, _ = template_frames[0].shape\n H, W, _ = template_frames[0].shape\n H, W, _ = search_frames[0].shape" }, { "identifier": "processing", "path": "lib/train/data/processing.py", "snippet": "def stack_tensors(x):\n def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None, joint_transform=None):\n def __call__(self, data: TensorDict):\n def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\n mode='pair', settings=None, *args, **kwargs):\n def _get_jittered_box(self, box, mode):\n def __call__(self, data: TensorDict):\nclass BaseProcessing:\nclass STARKProcessing(BaseProcessing):" }, { "identifier": "LTRLoader", "path": "lib/train/data/loader.py", "snippet": "class LTRLoader(torch.utils.data.dataloader.DataLoader):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n\n Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\n select along which dimension the data should be stacked to form a batch.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset. If specified, ``shuffle`` must be False.\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\n indices at a time. Mutually exclusive with batch_size, shuffle,\n sampler, and drop_last.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process.\n (default: 0)\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n into CUDA pinned memory before returning them.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative. (default: 0)\n worker_init_fn (callable, optional): If not None, this will be called on each\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n input, after seeding and before data loading. (default: None)\n\n .. note:: By default, each worker will have its PyTorch seed set to\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\n by main process using its RNG. However, seeds for other libraries\n may be duplicated upon initializing workers (w.g., NumPy), causing\n each worker to return identical random numbers. (See\n :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n use ``torch.initial_seed()`` to access the PyTorch seed for each\n worker in :attr:`worker_init_fn`, and use it to set other seeds\n before data loading.\n\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n unpicklable object, e.g., a lambda function.\n \"\"\"\n\n __initialized = False\n\n def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None):\n if collate_fn is None:\n if stack_dim == 0:\n collate_fn = ltr_collate\n elif stack_dim == 1:\n collate_fn = ltr_collate_stack1\n else:\n raise ValueError('Stack dim no supported. Must be 0 or 1.')\n\n super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n num_workers, collate_fn, pin_memory, drop_last,\n timeout, worker_init_fn)\n\n self.name = name\n self.training = training\n self.epoch_interval = epoch_interval\n self.stack_dim = stack_dim" }, { "identifier": "opencv_loader", "path": "lib/train/data/image_loader.py", "snippet": "def opencv_loader(path):\n \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\n try:\n im = cv.imread(path, cv.IMREAD_COLOR)\n\n # convert to rgb and return\n return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n except Exception as e:\n print('ERROR: Could not read image \"{}\"'.format(path))\n print(e)\n return None" }, { "identifier": "is_main_process", "path": "lib/utils/misc.py", "snippet": "def is_main_process():\n return get_rank() == 0" } ]
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet, BDD100K_Night, SHIFT_Night, ExDark from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
21,560
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB")
datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader))
2
2023-11-20 06:41:15+00:00
24k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n # print('************************encoder shape',x.shape)\n\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n\n\n if conditioning is not None:\n if isinstance(conditioning, dict):\n if isinstance(list(conditioning.values())[0],list):\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n else:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "Attention_AR_counter", "path": "text_super_resolution/model/VisionLAN/utils.py", "snippet": "class Attention_AR_counter():\n def __init__(self, display_string, dict_file, case_sensitive):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n self.display_string = display_string\n self.case_sensitive = case_sensitive\n self.de = cha_encdec(dict_file, case_sensitive)\n\n def clear(self):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n \n def add_iter(self, output, out_length, label_length, labels):\n self.total_samples += label_length.size()[0]\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n for i in range(0, len(prdt_texts)):\n if not self.case_sensitive:\n prdt_texts[i] = prdt_texts[i].lower()\n labels[i] = labels[i].lower()\n all_words = []\n for w in labels[i].split('|') + prdt_texts[i].split('|'):\n if w not in all_words:\n all_words.append(w)\n l_words = [all_words.index(_) for _ in labels[i].split('|')]\n p_words = [all_words.index(_) for _ in prdt_texts[i].split('|')]\n self.distance_C += ed.eval(labels[i], prdt_texts[i])\n self.distance_W += ed.eval(l_words, p_words)\n self.total_C += len(labels[i])\n self.total_W += len(l_words)\n self.correct = self.correct + 1 if labels[i] == prdt_texts[i] else self.correct\n return prdt_texts, labels\n\n def show(self):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W))\n self.clear()\n def show_test(self,best_acc, change= False):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n if (self.correct / self.total_samples) > best_acc:\n best_acc = np.copy(self.correct / self.total_samples)\n change = True\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}, best_acc: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W, best_acc))\n\n self.clear()\n return best_acc, change\n \n def convert(self, output, out_length):\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n prdt_prob = prdt_prob.cpu().unsqueeze(0)\n MAX_LEN = 25\n length = prdt_prob.size(1)\n if length >= MAX_LEN:\n return prdt_prob[:, :MAX_LEN, :], prdt_prob\n pad = torch.zeros([prdt_prob.shape[0], MAX_LEN - length, prdt_prob.shape[2]])\n prdt_prob = torch.cat([prdt_prob, pad], dim=1)\n return prdt_texts, prdt_prob" }, { "identifier": "TPSSpatialTransformer", "path": "text_super_resolution/model/tps_spatial_transformer.py", "snippet": "class TPSSpatialTransformer(nn.Module):\n\n def __init__(self, output_image_size=None, num_control_points=None, margins=None):\n super(TPSSpatialTransformer, self).__init__()\n self.output_image_size = output_image_size\n self.num_control_points = num_control_points\n self.margins = margins\n\n self.target_height, self.target_width = output_image_size\n target_control_points = build_output_control_points(num_control_points, margins)\n N = num_control_points\n # N = N - 4\n\n # create padded kernel matrix\n forward_kernel = torch.zeros(N + 3, N + 3)\n target_control_partial_repr = compute_partial_repr(target_control_points, target_control_points)\n forward_kernel[:N, :N].copy_(target_control_partial_repr)\n forward_kernel[:N, -3].fill_(1)\n forward_kernel[-3, :N].fill_(1)\n forward_kernel[:N, -2:].copy_(target_control_points)\n forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1))\n # compute inverse matrix\n inverse_kernel = torch.inverse(forward_kernel)\n\n # create target cordinate matrix\n HW = self.target_height * self.target_width\n target_coordinate = list(itertools.product(range(self.target_height), range(self.target_width)))\n target_coordinate = torch.Tensor(target_coordinate) # HW x 2\n Y, X = target_coordinate.split(1, dim = 1)\n Y = Y / (self.target_height - 1)\n X = X / (self.target_width - 1)\n target_coordinate = torch.cat([X, Y], dim = 1) # convert from (y, x) to (x, y)\n target_coordinate_partial_repr = compute_partial_repr(target_coordinate, target_control_points)\n target_coordinate_repr = torch.cat([\n target_coordinate_partial_repr, torch.ones(HW, 1), target_coordinate\n ], dim = 1)\n\n # register precomputed matrices\n self.register_buffer('inverse_kernel', inverse_kernel)\n self.register_buffer('padding_matrix', torch.zeros(3, 2))\n self.register_buffer('target_coordinate_repr', target_coordinate_repr)\n self.register_buffer('target_control_points', target_control_points)\n\n def forward(self, input, source_control_points):\n assert source_control_points.ndimension() == 3\n assert source_control_points.size(1) == self.num_control_points\n assert source_control_points.size(2) == 2\n batch_size = source_control_points.size(0)\n\n Y = torch.cat([source_control_points, self.padding_matrix.expand(batch_size, 3, 2)], 1)\n mapping_matrix = torch.matmul(self.inverse_kernel, Y)\n source_coordinate = torch.matmul(self.target_coordinate_repr, mapping_matrix)\n\n grid = source_coordinate.view(-1, self.target_height, self.target_width, 2)\n grid = torch.clamp(grid, 0, 1) # the source_control_points may be out of [0, 1].\n # the input to grid_sample is normalized [-1, 1], but what we get is [0, 1]\n grid = 2.0 * grid - 1.0\n output_maps = grid_sample(input, grid, canvas=None)\n return output_maps, source_coordinate" }, { "identifier": "STNHead", "path": "text_super_resolution/model/stn_head.py", "snippet": "class STNHead(nn.Module):\n def __init__(self, in_planes, num_ctrlpoints, activation='none', input_size=(16, 64)):\n super(STNHead, self).__init__()\n\n self.in_planes = in_planes\n self.num_ctrlpoints = num_ctrlpoints\n self.activation = activation\n self.stn_convnet = nn.Sequential(\n # conv3x3_block(in_planes, 32), # 32*128\n # nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(in_planes, 32), # 16*64\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(32, 64), # 8*32\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(64, 128), # 4*16\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(128, 256), # 2*8\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(256, 256), # 1*4,\n nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)),\n conv3x3_block(256, 256)) # 1*2\n\n flatten_width = int(input_size[1] / 32)\n # print(\"flw:\", input_size[1] / 32)\n self.stn_fc1 = nn.Sequential(\n nn.Linear(512, 512), #flatten_width*256\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True))\n self.stn_fc2 = nn.Linear(512, num_ctrlpoints*2)\n\n self.init_weights(self.stn_convnet)\n self.init_weights(self.stn_fc1)\n self.init_stn(self.stn_fc2)\n\n def init_weights(self, module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.001)\n m.bias.data.zero_()\n\n def init_stn(self, stn_fc2):\n margin = 0.01\n sampling_num_per_side = int(self.num_ctrlpoints / 2)\n ctrl_pts_x = np.linspace(margin, 1.-margin, sampling_num_per_side)\n ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin\n ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1-margin)\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n ctrl_points = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0).astype(np.float32)\n # print(ctrl_points.shape)\n if self.activation is 'none':\n pass\n elif self.activation == 'sigmoid':\n ctrl_points = -np.log(1. / ctrl_points - 1.)\n elif self.activation == 'relu':\n ctrl_points = F.relu(torch.Tensor(ctrl_points))\n stn_fc2.weight.data.zero_()\n stn_fc2.bias.data = torch.Tensor(ctrl_points).view(-1)\n\n def forward(self, x):\n x = self.stn_convnet(x)\n batch_size, _, h, w = x.size()\n x = x.view(batch_size, -1)\n\n # print(\"x:\", x.shape)\n\n img_feat = self.stn_fc1(x)\n x = self.stn_fc2(0.1 * img_feat)\n if self.activation == 'sigmoid':\n x = torch.sigmoid(x)\n if self.activation == 'relu':\n x = F.relu(x)\n x = x.view(-1, self.num_ctrlpoints, 2)\n return img_feat, x" }, { "identifier": "VisionLAN", "path": "text_super_resolution/model/VisionLAN/VisionLAN.py", "snippet": "class VisionLAN(nn.Module):\n '''\n Architecture of VisionLAN\n input\n input: input image\n label_pos: character index\n output\n text_pre: word-level prediction from VRM\n test_rem: remaining string prediction from MLM\n text_mas: occluded character prediction from MLM\n '''\n def __init__(self, strides, input_shape):\n super(VisionLAN, self).__init__()\n self.backbone = resnet.resnet45(strides, compress_layer=False)\n self.input_shape = input_shape\n self.MLM_VRM = MLM_VRM()\n def forward(self, input, label_pos, training_stp, Train_in = True):\n # extract features\n features = self.backbone(input)\n # MLM + VRM\n if Train_in:\n text_pre, test_rem, text_mas, mask_map = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return text_pre, test_rem, text_mas, mask_map\n else:\n output, out_length = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return output, out_length" }, { "identifier": "SemanticLoss", "path": "text_super_resolution/loss/semantic_loss.py", "snippet": "class SemanticLoss(nn.Module):\n def __init__(self, margin=0.1):\n super(SemanticLoss, self).__init__()\n self.cos_sim = nn.CosineSimilarity(dim=-1, eps=1e-8)\n self.margin = margin\n\n self.lambda1 = 1.0\n self.lambda2 = 1.0\n\n self.kl_loss = torch.nn.KLDivLoss()\n\n def forward(self, pred_vec, gt_vec):\n # pred_vec: [N, C]\n # gt_vec: [N, C]\n # mean_sim = torch.mean(self.cos_sim(gt_vec, pred_vec))\n # sim_loss = 1 - mean_sim\n \n #noise = Variable(torch.rand(pred_vec.shape)) * 0.1 - 0.05\n\n #normed_pred_vec = pred_vec + noise.to(pred_vec.device)\n # print(\"pred_vec:\", pred_vec.shape)\n norm_vec = torch.abs(gt_vec - pred_vec)\n margin_loss = torch.mean(norm_vec) #\n\n # pr int(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n ce_loss = self.kl_loss(torch.log(pred_vec + 1e-20), gt_vec + 1e-20)\n # print(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n\n return self.lambda1 * margin_loss + self.lambda2 * ce_loss# ce_loss #margin_loss # + ce_loss # + sim_loss #margin_loss +\n\n def cross_entropy(self, pred_vec, gt_vec, l=1e-5):\n cal = gt_vec * torch.log(pred_vec+l) + (1 - gt_vec) * torch.log(1 - pred_vec+l)\n #print(\"cal:\", cal)\n return -cal" }, { "identifier": "ssim_psnr", "path": "text_super_resolution/utils/ssim_psnr.py", "snippet": "def calculate_psnr(img1, img2):\ndef weighted_calculate_psnr(img1, img2, weighted_mask):\ndef gaussian(window_size, sigma):\ndef create_window(window_size, channel):\ndef create_rect_window(window_H, window_W, channel):\ndef _ssim_weighted(img1_, img2_, window, window_size, channel, weighted_mask, size_average=True):\ndef _ssim(img1, img2, window, window_size, channel, size_average=True):\ndef _tri_ssim(img1, img2, img3, window, window_size, channel, size_average=True):\ndef _ssim_rect(img1, img2, window, window_size, channel, size_average=True):\n def __init__(self, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, img3):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, weighted_mask):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\ndef ssim(img1, img2, window_size=11, size_average=True):\ndef ssim_weighted(img1, img2, weighted_mask, window_size=11, size_average=True):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n H, W = window_size\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\nclass Distorted_SSIM(torch.nn.Module):\nclass SSIM(torch.nn.Module):\nclass TRI_SSIM(torch.nn.Module):\nclass SSIM_WEIGHTED(torch.nn.Module):\nclass SSIM_TSR(torch.nn.Module):" } ]
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
15,162
cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) # print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) # print("reducing stride") # print('ddpm','x_noisy shape',x_noisy.shape,'ks',ks,'stride',stride) fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") print(sd.keys()) print(sd['epoch']) print(sd['global_step']) print(sd['callbacks']) # print(sd['optimizer_states']) # print(sd['lr_schedulers']) # print(sd['state_dict'].keys()) # exit(0) if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): # print('************************fuck',k) x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins)) self.stn_head = STNHead( in_planes=3, num_ctrlpoints=num_control_points, activation='none', input_size=self.tps_inputsize) self.standard_text = standard_text if self.standard_text: # self.VL_model = self.VisionLAN_init(VL_pretrained_path) # self.test_acc_counter = Attention_AR_counter('\ntest accuracy: ', # '/home/zhouyuxuan/latent-diffusion/dic_36.txt', False) self.font_path = font_path pygame.init() freetype.init() self.cal_psnr = ssim_psnr.calculate_psnr self.cal_ssim = ssim_psnr.SSIM() def VisionLAN_init(self, path=None): cfg = {'args': { 'strides': [(1, 1), (2, 2), (2, 2), (2, 2), (1, 1), (1, 1)], 'input_shape': [3, 64, 256], # C x H x W }, 'init_state_dict': '/home/zhouyuxuan/latent-diffusion/visionlan.pth', } model_VL = VisionLAN(**cfg['args']) model_path = cfg['init_state_dict'] if path is None else path print('load pre_trained VisionLAN model from %s' % model_path) model_VL = model_VL.to(self.device) model_VL = nn.DataParallel(model_VL) if cfg['init_state_dict'] != None: fe_state_dict_ori = torch.load(model_path) fe_state_dict = OrderedDict() for k, v in fe_state_dict_ori.items(): if 'module' not in k: k = 'module.' + k else: k = k.replace('features.module.', 'module.features.') fe_state_dict[k] = v model_dict_fe = model_VL.state_dict() state_dict_fe = {k: v for k, v in fe_state_dict.items() if k in model_dict_fe.keys()} model_dict_fe.update(state_dict_fe) model_VL.load_state_dict(model_dict_fe) return model_VL def parse_visionlan_data(self, imgs_input): imgs_input = transforms.ToPILImage()(imgs_input).convert('RGB') imgs_input = cv2.resize(np.array(imgs_input), (256, 64)) imgs_input = transforms.ToTensor()(imgs_input).unsqueeze(0) imgs_input = imgs_input.to(self.device) return imgs_input def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids def on_save_checkpoint(self, checkpoint): if not isinstance(self.cond_stage_model, torch.nn.Identity): self.cond_stage_model.save_state_dict( '/home/zhouyuxuan/latent-diffusion/crnn_ckpt/', self.current_epoch) @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # print(x.shape) # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[1] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) # print('weighting',weighting.shape,Ly,Lx) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[1] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: # if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) if self.text_prior_enable: c = self.get_additional_cond(xc, c) # c = {'c_concat': [xc], 'c_crossattn': [c]} else: c = xc if bs is not None: if isinstance(c, dict): for k, v in c.items(): c[k] = [v[0][:bs]] else: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] # print('fuck',c.shape) if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape print('decode z shape', z.shape) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") print(ks, stride, uf) fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape print('encode x shape', x.shape) print('ks', ks, 'stride', stride, 'df', df) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) print('encode z shape', z.shape) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def on_validation_start(self) -> None: print(f'******************************in validation {self.current_epoch}') def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) if self.fid_eval and self.current_epoch % 10 == 0: results = self.recognize_sample(batch, N=114514, inpaint=False) rec_image = results['samples'] target = batch[self.first_stage_key] target = rearrange(target, 'b h w c -> b c h w') cond = batch[self.cond_stage_key] cond = rearrange(cond, 'b h w c -> b c h w') if self.visualize: batchlen = rec_image.shape[0] rc = int(math.sqrt(batchlen)) f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) print(len(axs), batchlen, int(math.sqrt(batchlen))) assert len(axs) ** 2 == batchlen for i in range(batchlen): axs[i // rc, i % rc].set_xticklabels([]) axs[i // rc, i % rc].set_yticklabels([]) axs[i // rc, i % rc].set_aspect('equal') axs[i // rc, i % rc].imshow(rec_image[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/sample_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(target[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/target_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(cond[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/input_{batch_idx}.jpg') PSNR = self.cal_psnr(rec_image[:, :3], target[:, :3]) SSIM = self.cal_ssim(rec_image[:, :3], target[:, :3]) self.log_dict({'PSNR': PSNR, 'SSIM': SSIM}, prog_bar=False, logger=True, on_step=False, on_epoch=True) def shared_step(self, batch, **kwargs): # print('*******************************************************batch',batch['image'].shape) # print('*******************************************************batch',batch['image'].shape) # if hasattr(self, "split_input_params"): # print(self.split_input_params) # else: # print('fuck') x, c = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x, c) if self.recog_loss_enable: HR = batch['image'] HR = rearrange(HR, 'b h w c -> b c h w') HR = HR.to(memory_format=torch.contiguous_format).float() LR = c label_vecs = self.get_learned_conditioning(c).permute(1, 0, 2) label_vecs_hr = self.get_learned_conditioning(HR).permute(1, 0, 2) loss_recog_distill = sem_loss(label_vecs, label_vecs_hr) * 100 # 100 loss = loss + loss_recog_distill loss_dict.update({f'loss_recog': loss_recog_distill}) # return loss + loss_recog_distill, loss_dict # # else: return loss, loss_dict def get_additional_cond(self, c, tp): if self.stn: _, ctrl_points_c = self.stn_head(c) c, _ = self.tps(c, ctrl_points_c) if self.standard_text: x_q = torch.empty(1, 2, c.shape[2], c.shape[3]) # prob_lr = torch.empty(1, 25, 37) rec_results = get_string_crnn(tp.permute(1, 0, 2), False) for i in range(c.shape[0]): # visionlan_dict_lr = self.parse_visionlan_data(c[i, :3, :, :]) # target = '' # label_lr, label_length = self.VL_model(visionlan_dict_lr, target, '', False) # pred_str_lr, pred_prob = self.test_acc_counter.convert(label_lr, label_length) # s = pred_str_lr[0] # prob_lr = torch.cat([prob_lr, pred_prob], dim=0) s = rec_results[i] if s == "" or type(s) == torch.Tensor: s = "\t" lower_case = s.lower() upper_case = s.upper() i_t_lower = make_standard_text(self.font_path, lower_case, (c.shape[2], c.shape[3])) i_t_lower_tensor = torch.from_numpy(i_t_lower).unsqueeze(0).unsqueeze(0) i_t_upper = make_standard_text(self.font_path, upper_case, (c.shape[2], c.shape[3])) i_t_upper_tensor = torch.from_numpy(i_t_upper).unsqueeze(0).unsqueeze(0) i_t_tensor = torch.cat([i_t_lower_tensor, i_t_upper_tensor], dim=1) x_q = torch.cat([x_q, i_t_tensor], dim=0) x_q = x_q[1:] # prob_lr = prob_lr[1:] x_q = x_q.to(self.device) # prob_lr = prob_lr.to(self.device) c = torch.cat([c, x_q], dim=1) return {'c_concat': [c], 'c_crossattn': [tp]} def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.text_prior_enable and self.model.conditioning_key == 'hybrid': tp = self.get_learned_conditioning(c) c = self.get_additional_cond(c, tp) else: if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) # print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) # print("reducing stride") # print('ddpm','x_noisy shape',x_noisy.shape,'ks',ks,'stride',stride) fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
9
2023-11-20 06:34:21+00:00
24k
microsoft/Project-BayesDAG
src/causica/models/imodel.py
[ { "identifier": "Dataset", "path": "src/causica/datasets/dataset.py", "snippet": "class Dataset(BaseDataset):\n \"\"\"\n Class to store dense train/val/test data and masks and variables metadata.\n Note that the data and masks provided by this class are read only.\n \"\"\"\n\n def __init__(\n self,\n train_data: np.ndarray,\n train_mask: np.ndarray,\n val_data: Optional[np.ndarray] = None,\n val_mask: Optional[np.ndarray] = None,\n test_data: Optional[np.ndarray] = None,\n test_mask: Optional[np.ndarray] = None,\n variables: Optional[Variables] = None,\n data_split: Optional[Dict[str, Any]] = None,\n held_out_interventions: Optional[Dict[str, Any]]=None,\n true_posterior: Optional[Any]=None,\n graph_args: Optional[Dict[str, Any]]=None\n ) -> None:\n super().__init__(train_data, train_mask, val_data, val_mask, test_data, test_mask, variables, data_split, held_out_interventions, true_posterior, graph_args)\n\n # Ensure that data and masks are immutable\n if not issparse(self._train_data):\n self._train_data.setflags(write=False)\n self._train_mask.setflags(write=False)\n if test_data is not None and not issparse(test_data):\n self._test_data = cast(np.ndarray, test_data)\n self._test_data.setflags(write=False)\n self._test_mask = cast(np.ndarray, test_mask)\n self._test_mask.setflags(write=False)\n\n if val_data is not None and not issparse(val_data):\n self._val_data = cast(np.ndarray, val_data)\n self._val_mask = cast(np.ndarray, val_mask)\n self._val_data.setflags(write=False)\n self._val_mask.setflags(write=False)\n\n def to_causal(\n self,\n adjacency_data: Optional[np.ndarray],\n subgraph_data: Optional[np.ndarray],\n intervention_data: Optional[List[InterventionData]],\n counterfactual_data: Optional[List[InterventionData]] = None,\n ):\n \"\"\"\n Return the dag version of this dataset.\n \"\"\"\n return CausalDataset(\n train_data=self._train_data,\n train_mask=self._train_mask,\n adjacency_data=adjacency_data,\n subgraph_data=subgraph_data,\n intervention_data=intervention_data,\n counterfactual_data=counterfactual_data,\n val_data=self._val_data,\n val_mask=self._val_mask,\n test_data=self._test_data,\n test_mask=self._test_mask,\n variables=self._variables,\n data_split=self._data_split,\n held_out_interventions=self._held_out_interventions,\n true_posterior=self._true_posterior,\n graph_args=self._graph_args\n )\n\n @property\n def train_data_and_mask(self) -> Tuple[np.ndarray, np.ndarray]:\n # Add to avoid inconsistent type mypy error\n return self._train_data, self._train_mask" }, { "identifier": "Variables", "path": "src/causica/datasets/variables.py", "snippet": "class Variables:\n \"\"\"\n This class represents any variables present in a model.\n \"\"\"\n\n def __init__(\n self,\n variables: List[Variable],\n auxiliary_variables: Optional[List[Variable]] = None,\n used_cols: Optional[List[int]] = None,\n ) -> None:\n \"\"\"\n Args:\n variables: A list Variable objects.\n auxiliary_variables: A list of Variable objects only used for input into VAE,\n not produced in output.\n These are assumed to be appended onto the end of the variables in the data.\n Defaults to None - no aux variables present.\n used_cols: A list of column ids that were used when processing the original data.\n \"\"\"\n if not auxiliary_variables:\n auxiliary_variables = []\n self.auxiliary_variables = auxiliary_variables\n self._variables = variables\n\n self._deduplicate_names()\n\n # Dictionary mapping from variable name to variable index.\n self.name_to_idx = {var.name: idx for idx, var in enumerate(self._variables)}\n\n # Lists containing query and target variable indices\n self.target_var_idxs = []\n self.not_target_var_idxs = []\n self.query_var_idxs = []\n self.not_query_var_idxs = []\n for idx, var in enumerate(self._variables):\n if var.query:\n self.query_var_idxs.append(idx)\n else:\n self.not_query_var_idxs.append(idx)\n if var.target:\n self.target_var_idxs.append(idx)\n else:\n self.not_target_var_idxs.append(idx)\n\n if len(self.target_var_idxs) > 0 and all(idx in self.query_var_idxs for idx in self.target_var_idxs):\n warnings.warn(\n \"All target variables are marked as queriable, it is likely that active learning will always \"\n \"select these variables first.\"\n )\n\n # Lists containing continuous (including text) and binary/categorical variable indices\n self.var_idxs_by_type: DefaultDict[str, List[int]] = defaultdict(list)\n for idx, var in enumerate(self._variables + self.auxiliary_variables):\n self.var_idxs_by_type[var.type_].append(idx)\n\n # List of lists, where self.unprocessed_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data.\n self.unprocessed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.unprocessed_non_aux_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data (non-auxiliary).\n self.unprocessed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_cols[i] gives the columns occupied by the ith variable in the processed\n # data.\n self.processed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.processed_dim\n self.processed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_non_aux_cols[i] gives the columns occupied by the ith variable in the processed\n # data (non-auxiliary).\n self.processed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.processed_dim\n self.processed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # Set of all query group names, maintaining order in which they are first encountered when iterating through\n # the variables list. This is the simplest way to do this since dictionaries are guaranteed to be\n # insertion-ordered since Python 3.7\n self.group_names = list(dict.fromkeys([var.group_name for var in self._variables]))\n\n # List containing indices for each query group, where the query group names are assumed to be in the same order\n # as self.group_names\n self.group_idxs = [\n [idx for idx, var in enumerate(self._variables) if var.group_name == group_name]\n for group_name in self.group_names\n ]\n\n # Remove groups containing no queriable variables from self.group_names and self.group_idxs, as\n # we can guarantee that we will never query these groups.\n is_group_queriable = [any(self._variables[idx].query for idx in idxs) for idxs in self.group_idxs]\n\n self.group_names = [name for group_idx, name in enumerate(self.group_names) if is_group_queriable[group_idx]]\n self.group_idxs = [idxs for group_idx, idxs in enumerate(self.group_idxs) if is_group_queriable[group_idx]]\n\n # Save the list of observed column ids\n default_used_cols = list(range(len(self._variables) + len(auxiliary_variables))) # All columns observed\n self.used_cols = used_cols if used_cols is not None else default_used_cols\n assert len(self.used_cols) == len(self._variables) + len(self.auxiliary_variables)\n\n self.col_id_to_var_index = {old: new for new, old in enumerate(self.used_cols)}\n\n def __repr__(self):\n return str(self._variables)\n\n def __iter__(self) -> Iterator[Variable]:\n \"\"\"\n Iterate through the variables within the container.\n Note - Now it iterate through all the variables within the container\n (including auxiliary variables, if they are present)\n \"\"\"\n for var in self._all_variables:\n yield var\n\n def __getitem__(self, idx):\n return (self._all_variables)[idx]\n\n def __len__(self) -> int:\n return len(self._variables) + len(self.auxiliary_variables)\n\n @classmethod\n def create_from_json(cls, path: str) -> Variables:\n return cls.create_from_dict(read_json_as(path, dict))\n\n @classmethod\n def create_from_dict(cls, variables_dict: Dict[str, List[Any]]) -> Variables:\n \"\"\"\n Create variables object from a dictionary\n \"\"\"\n variables = variables_dict[\"variables\"]\n for var in variables:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n var_obj_list = [Variable(**var) for var in variables]\n\n auxiliary_vars = variables_dict.get(\"auxiliary_variables\", [])\n if len(auxiliary_vars) == 0:\n auxiliary_vars_obj = None\n else:\n for var in auxiliary_vars:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n\n auxiliary_vars_obj = [Variable(**var) for var in auxiliary_vars]\n\n used_cols = variables_dict.get(\"used_cols\", None)\n\n return cls(var_obj_list, auxiliary_vars_obj, used_cols)\n\n @classmethod\n def create_from_data_and_dict(\n cls, data: np.ndarray, mask: np.ndarray, variables_dict: Optional[Dict[str, Any]] = None\n ) -> Variables:\n \"\"\"\n Create variables object from an input dictionary, inferring missing fields using `data` and `mask`.\n \"\"\"\n # Infer missing fields in variables_dict\n variables_dict = cls.infer_from_data(data, mask, variables_dict, True)\n variables = cls.create_from_dict(variables_dict)\n return variables\n\n @staticmethod\n def _metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n ) -> Tuple[List[Any], Union[List[Any], None]]:\n \"\"\"\n Infer variables_metadata from input data\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n variables_type: is it aux variables, or normal variables\n Returns:\n varaibles_metadata: inferred metadata from input data\n A list of column ids that were used when processing the original data.\n \"\"\"\n\n variables_metadata = []\n # Use None rather than {} as default since mutable default args are dangerous in Python.\n used_cols = variables_dict.get(\"used_cols\", None)\n if used_cols:\n used_cols = cast(List[int], used_cols)\n assert len(used_cols) == data.shape[1]\n\n for idx, variable_metadata in enumerate(variables_dict[variables_type]):\n if not all(\n k in variable_metadata for k in [\"name\", \"type\", \"lower\", \"upper\", \"query\", \"target\", \"always_observed\"]\n ):\n # If variable metadata fully specified, do not try to infer, as doing column indexing can be expensive\n # for CSR sparse matrices.\n var_data = data[:, idx]\n var_mask = mask[:, idx]\n if issparse(var_data):\n var_data = var_data.toarray()\n var_mask = var_mask.toarray()\n\n if \"name\" not in variable_metadata:\n if used_cols:\n variable_metadata[\"name\"] = str(used_cols[idx])\n else:\n variable_metadata[\"name\"] = f\"Column {idx}\"\n\n # If data type/min max/num categories specified explicitly, overwrite variables file\n if \"type\" not in variable_metadata:\n # Test if all unmasked elements are integers\n\n if np.all((var_data * var_mask) // 1 == var_data * var_mask):\n if (var_data * var_mask).max() <= 1:\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as binary. This can be '\n \"changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"binary\"\n else:\n # Note that we always infer integer values with a max value > 1 as categorical. This may want to be\n # reconsidered if support for ordinal variables is introduced at a later date.\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as categorical. This can be'\n \" changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"categorical\"\n else:\n variable_metadata[\"type\"] = \"continuous\"\n\n if \"lower\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_lower = 0\n else:\n inferred_lower = min(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"lower\"] = inferred_lower\n print(\n f'Minimum value of variable {variable_metadata[\"name\"]} inferred as {inferred_lower}. This'\n \" can be changed manually in the dataset's variables.json file\"\n )\n\n if \"upper\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_upper = 1\n else:\n inferred_upper = max(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"upper\"] = inferred_upper\n print(\n f'Max value of variable {variable_metadata[\"name\"]} inferred as {inferred_upper}. This can '\n \"be changed manually in the dataset's variables.json file\"\n )\n\n if \"query\" not in variable_metadata:\n # By default, assume all variables can be queried unless specified otherwise.\n if variables_type == \"auxiliary_variables\":\n variable_metadata[\"query\"] = False\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a non-queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n else:\n variable_metadata[\"query\"] = True\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n\n if \"target\" not in variable_metadata:\n # By default, assume variable is a target if and only if it is not queriable.\n variable_metadata[\"target\"] = not variable_metadata[\"query\"]\n fill_string = \"not \" if not variable_metadata[\"target\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an active learning target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"target\" field.'\n )\n\n if \"always_observed\" not in variable_metadata:\n # By default, assume variable is always observed if there is no missing in the mask.\n if np.sum((var_mask - 1) ** 2) == 0:\n variable_metadata[\"always_observed\"] = True\n else:\n variable_metadata[\"always_observed\"] = False\n fill_string = \"not \" if not variable_metadata[\"always_observed\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an always observed target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"always_observed\" field.'\n )\n\n variables_metadata.append(variable_metadata)\n\n return variables_metadata, used_cols\n\n @staticmethod\n def infer_from_data(data, mask, variables_dict=None, infer_aux_variables=False) -> Dict[str, List[Any]]:\n \"\"\"\n Infer missing values in an input variables dictionary, using the input data.\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n infer_aux_variables: infer auxiliary variables for GINA or not.\n Returns:\n variables_dict: Updated version of the input variables_dict, with missing variables and fields inferred from the\n data.\n \"\"\"\n\n if variables_dict is None:\n variables_dict = {}\n\n # NOTE this assumes all variables have only one column in unprocessed data, which should always be the case when\n # inferring from a dataset.\n if \"auxiliary_variables\" not in variables_dict:\n variables_dict[\"auxiliary_variables\"] = []\n\n if \"variables\" not in variables_dict or variables_dict[\"variables\"] == []:\n num_var_cols = data.shape[1] - len(variables_dict[\"auxiliary_variables\"])\n variables_dict[\"variables\"] = [{} for _ in range(num_var_cols)]\n\n variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": variables_dict[\"auxiliary_variables\"],\n \"used_cols\": used_cols,\n }\n if infer_aux_variables:\n aux_variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"auxiliary_variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": aux_variables_metadata,\n \"used_cols\": used_cols,\n }\n\n return variables_dict\n\n @property\n def _all_variables(self):\n return self._variables + self.auxiliary_variables\n\n @property\n def has_auxiliary(self) -> bool:\n \"\"\"\n True if there are aux variables present.\n \"\"\"\n return len(self.auxiliary_variables) > 0\n\n @property\n def binary_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all binary variables.\n \"\"\"\n return self.var_idxs_by_type[\"binary\"]\n\n @property\n def categorical_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all categorical variables.\n \"\"\"\n return self.var_idxs_by_type[\"categorical\"]\n\n @property\n def discrete_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all discrete (i.e. binary or categorical) variables. We sort to ensure that the\n combined list is in ascending order.\n \"\"\"\n return sorted(self.var_idxs_by_type[\"categorical\"] + self.var_idxs_by_type[\"binary\"])\n\n @property\n def continuous_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all continuous variables.\n \"\"\"\n return self.var_idxs_by_type[\"continuous\"]\n\n @property\n def text_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all text variables.\n \"\"\"\n return self.var_idxs_by_type[\"text\"]\n\n @property\n def non_text_idxs(self) -> List[bool]:\n \"\"\"Helper method. Returns list of booleans, where an element\n at index i indicates whether a variable at index i is non-text or not\n e.g. For Variables object of [...\"continous\"..., ...\"text\"..., \"continuous\"],\n the result would be [True, False, True]\n \"\"\"\n unproc_cols_by_type = self.unprocessed_cols_by_type\n if \"text\" not in unproc_cols_by_type:\n return [True for _ in range(len(self))]\n return (~np.in1d(range(len(self)), unproc_cols_by_type[\"text\"])).tolist()\n\n @property\n def num_unprocessed_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_cols)\n\n @property\n def num_unprocessed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_non_aux_cols)\n\n @property\n def num_processed_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_cols)\n\n @property\n def num_processed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_non_aux_cols)\n\n @property\n def num_groups(self) -> int:\n \"\"\"\n Return the number of unique query groups in the variables object.\n \"\"\"\n return len(self.group_names)\n\n @property\n def group_mask(self) -> np.ndarray:\n \"\"\"\n Return a mask of shape (num_groups, num_processed_cols) indicating which column\n corresponds to which group.\n \"\"\"\n mask = np.zeros((self.num_groups, self.num_processed_cols), dtype=bool)\n for group_idx, group in enumerate(self.group_idxs):\n for var in group:\n for proc_col in self.processed_cols[var]:\n mask[group_idx, proc_col] = 1\n return mask\n\n @property\n def proc_always_observed_list(self) -> List[Optional[bool]]:\n \"\"\"\n The mask that indicates if the variable is always observed (for processed data)\n \"\"\"\n return sum(([var.always_observed] * var.processed_dim for var in self._all_variables), [])\n\n @property\n def processed_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data associated with each variable of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._all_variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def processed_non_aux_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data (w/o aux variables) associated with each\n variable of that type.\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def unprocessed_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._all_variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n @property\n def unprocessed_non_aux_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n def subset(self, idxs: List[int], auxiliary_idxs: Optional[List[int]] = None) -> Variables:\n \"\"\"\n Returns a new Variables object containing only the Variable objects whose indices are given in `idxs`.\n Note that this currently ignores metadata variables.\n \"\"\"\n if auxiliary_idxs is None:\n auxiliary_idxs = []\n\n variables_list = [self._variables[idx] for idx in idxs]\n auxiliary_variables_list = [self.auxiliary_variables[idx] for idx in auxiliary_idxs]\n return Variables(variables_list, auxiliary_variables_list)\n\n def to_dict(self) -> Dict[str, Any]:\n variables_list = [var.to_json() for var in self._variables]\n if self.auxiliary_variables is None:\n auxiliary_vars_list = []\n else:\n auxiliary_vars_list = [var.to_json() for var in self.auxiliary_variables]\n\n variables_json = {\n \"variables\": variables_list,\n \"auxiliary_variables\": auxiliary_vars_list,\n \"used_cols\": [int(col) for col in self.used_cols],\n }\n return variables_json\n\n def save(self, path: str) -> None:\n variables_json = self.to_dict()\n save_json(variables_json, path)\n\n def as_list(self) -> List[Variable]:\n return self._variables\n\n def get_idxs_from_name_list(self, variable_names: List[Union[str, int]]) -> np.ndarray:\n \"\"\"\n Get a binary array of shape (variable_count,), where for each index the array value is 1 if the corresponding\n variable is named in `variable_names`, and 0 otherwise.\n \"\"\"\n variables_to_query = np.zeros((len(self._variables),))\n # Look up indices of specified variables and mark as queriable.\n for variable_name in variable_names:\n # Cast name to string in case numeric names (e.g. question ids) have been input as integers.\n variable_name = str(variable_name)\n variable_idx = self.name_to_idx[variable_name]\n variables_to_query[variable_idx] = 1\n\n return variables_to_query\n\n def get_observable_groups(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of indices for groups that are still observable in the current row\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n list of indices of groups that can be observed, where the indices correspond to the corresponding group\n names in `self.group_names`.\n \"\"\"\n observable_variables_idxs = self.get_observable_variable_idxs(data_mask_row, obs_mask_row)\n observable_groups_idxs: List[int] = []\n for group_idx, idxs in enumerate(self.group_idxs):\n if any(i in observable_variables_idxs for i in idxs):\n observable_groups_idxs.append(group_idx)\n return observable_groups_idxs\n\n def get_observable_variable_idxs(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of variable idxs for variables that are still observable in the current row.\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n observable_vars: List of indices of variables that can be observed.\n \"\"\"\n if data_mask_row.ndim != 1:\n raise ValueError(f\"Test mask should be 1D, had {data_mask_row.ndim} dims and shape {data_mask_row.shape}.\")\n if obs_mask_row.ndim != 1:\n raise ValueError(\n f\"Observation mask should be 1D, had {obs_mask_row.ndim} dims and shape {obs_mask_row.shape}.\"\n )\n if len(obs_mask_row) != len(data_mask_row) or len(data_mask_row) != len(self._variables):\n # One likely cause is accidentally passing 'processed' masks, which may be longer\n # if some variables are categorical.\n raise ValueError(\n f\"Lengths of obs_mask_row {len(obs_mask_row)}, data_mask_row {len(data_mask_row)}, \"\n f\"and variables list {len(self._variables)} should all be the same.\"\n )\n # Get ids where there is an underlying data value (test_mask == 1) and that we haven't yet queried (obs_mask == 0)\n unobserved_idxs = np.where((data_mask_row == 1) & (obs_mask_row == 0))[0]\n\n # Intersection of these and query_var_idxs.\n observable_idx_set = set(unobserved_idxs).intersection(set(self.query_var_idxs))\n return list(observable_idx_set)\n\n def get_var_cols_from_data(self, var_idx, data):\n \"\"\"\n Get data from an array for a single variable only.\n\n Args:\n var_idx: Index of variable we want data for.\n data (shape (batch_size, variable_count)): Array to get variable info from.\n\n Returns:\n var_data (shape (observed_count, processed_dim)): Values only for\n the corresponding variable.\n \"\"\"\n return data[:, self.processed_cols[var_idx]]\n\n def get_variables_to_observe(self, data_mask: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Return a boolean tensor of length num_variables, where each element indicates whether the corresponding variable\n can be queried during active learning (i.e. the variable is queriable and has at least one observed value in\n the data).\n Args:\n data_mask (shape (batch_size, num_processed_cols)): Processed mask\n\n Returns:\n torch.Tensor (shape (variable_count,)): True where it's a query-able variable and we have at least one\n observed value\n \"\"\"\n cols_with_data = data_mask.sum(dim=0).to(torch.bool)\n\n # data_mask may have multiple columns for a single variable, if it's a categorical variable. Pick first entry per variable\n ii = torch.tensor([cols[0] for cols in self.processed_cols], dtype=torch.long, device=cols_with_data.device)\n cols_with_data = torch.index_select(cols_with_data, 0, ii)\n is_query_id = torch.zeros(len(self), dtype=torch.bool, device=cols_with_data.device)\n is_query_id[\n tuple(self.query_var_idxs),\n ] = True\n return is_query_id * cols_with_data\n\n def _deduplicate_names(self):\n # Produce warning if var name is reused and add an increasing integer to the end until it is unique.\n var_names = set()\n for var in self._all_variables:\n i = 2\n original_name = var.name\n while var.name in var_names:\n new_name = f\"{original_name}_{i}\"\n var.name = new_name\n i += 1\n if var.name != original_name:\n # Do the warning in a separate block to the while loop so that we only raise one warning if we have to\n # try appending several different integers to the name.\n warnings.warn(\n f\"Name {original_name} has already been used, renaming to {var.name}\",\n UserWarning,\n )\n var_names.add(var.name)\n\n # TODO: Maybe create Variables.Utils for methods like the below one\n @staticmethod\n def create_empty_data(variables: Variables) -> np.ndarray:\n var_count = len(variables)\n empty_data = np.zeros((1, var_count), dtype=object)\n for i in range(var_count):\n if variables[i].type_ == \"text\":\n empty_data[:, i] = \"empty str\"\n return empty_data" }, { "identifier": "DataProcessor", "path": "src/causica/preprocessing/data_processor.py", "snippet": "class DataProcessor:\n def __init__(\n self,\n variables: Variables,\n unit_scale_continuous: bool = True,\n standardize_data_mean: bool = False,\n standardize_data_std: bool = False,\n ):\n \"\"\"\n Args:\n variables (Variables): Information about variables/features used\n by this model.\n unit_scale_continuous (bool): Scale continuous variables to the range of [0, 1].\n standardize_data_mean (bool): Standardize continuous variables to mean=0\n standardize_data_std (bool): Standardize continuous variables to std=1\n \"\"\"\n if unit_scale_continuous and (standardize_data_mean or standardize_data_std):\n raise ValueError(\"Cannot unit scale and standardize variables simultanously.\")\n self._variables = variables\n\n # Call unprocessed columns unproc_cols, processed columns proc_cols\n unproc_cols_by_type = self._variables.unprocessed_cols_by_type\n proc_cols_by_type = self._variables.processed_cols_by_type\n\n def flatten(lists):\n # Flatten proc_cols for continuous and binary unproc_cols, since they will be of form [[1], [2], ...]\n return [i for sublist in lists for i in sublist]\n\n if \"binary\" in unproc_cols_by_type:\n self._bin_unproc_cols = unproc_cols_by_type[\"binary\"]\n self._bin_proc_cols = flatten(proc_cols_by_type[\"binary\"])\n\n # Save contiguous regions containig binary features to allow for more efficient processing via slicing\n self._bin_unproc_regions = self.split_contiguous_sublists(self._bin_unproc_cols)\n self._bin_proc_regions = self.split_contiguous_sublists(self._bin_proc_cols)\n assert len(self._bin_unproc_regions) == len(self._bin_proc_regions)\n else:\n self._bin_unproc_cols, self._bin_proc_cols = [], []\n\n if \"continuous\" in unproc_cols_by_type:\n self._cts_unproc_cols = unproc_cols_by_type[\"continuous\"]\n self._cts_proc_cols = flatten(proc_cols_by_type[\"continuous\"])\n\n # Save contiguous regions containing continuous features to allow for more efficient processing via slicing\n if all(x.overwrite_processed_dim is None for x in self._variables):\n self._cts_unproc_regions = self.split_contiguous_sublists(self._cts_unproc_cols)\n self._cts_proc_regions = self.split_contiguous_sublists(self._cts_proc_cols)\n else:\n # For VAEM, we can only take single variable as region\n # to allow for processing/reverting mask\n self._cts_unproc_regions = [[col_id] for col_id in unproc_cols_by_type[\"continuous\"]]\n self._cts_proc_regions = proc_cols_by_type[\"continuous\"]\n assert len(self._cts_unproc_regions) == len(self._cts_proc_regions)\n if unit_scale_continuous:\n self._cts_normalizers = [\n UnitScaler(variables[i] for i in unproc_region) for unproc_region in self._cts_unproc_regions\n ]\n elif standardize_data_mean or standardize_data_std:\n self._cts_normalizers = [\n StandardScaler(with_mean=standardize_data_mean, with_std=standardize_data_std)\n for _ in self._cts_unproc_regions\n ]\n else:\n self._cts_normalizers = [IdentityTransform()] * len(self._cts_unproc_regions)\n else:\n self._cts_unproc_cols, self._cts_proc_cols, self._cts_normalizers = [], [], []\n\n if \"categorical\" in unproc_cols_by_type:\n self._cat_unproc_cols = unproc_cols_by_type[\"categorical\"]\n self._cat_proc_cols = flatten(proc_cols_by_type[\"categorical\"])\n self._cat_proc_cols_grouped = proc_cols_by_type[\"categorical\"]\n\n def get_lower(idx):\n return self._variables[idx].lower\n\n def get_upper(idx):\n return self._variables[idx].upper\n\n var_categories = [\n np.arange(int(get_lower(var_idx)), int(get_upper(var_idx)) + 1) for var_idx in self._cat_unproc_cols\n ]\n self._one_hot_encoder = OneHotEncoder(categories=var_categories, sparse=False, handle_unknown=\"ignore\")\n # Fit on dummy data due to an issue in sklearn where the encoder needs to be fitted to data even if the\n # categories are specified upon creation.\n self._one_hot_encoder.fit(np.array([categories[0] for categories in var_categories]).reshape(1, -1))\n else:\n self._cat_unproc_cols, self._cat_proc_cols = [], []\n\n \n self._txt_unproc_cols, self._txt_proc_cols = [], []\n\n self._num_processed_cols = sum(var.processed_dim for var in self._variables)\n\n def process_data_and_masks(\n self,\n data: csr_matrix,\n data_mask: csr_matrix,\n *extra_masks: csr_matrix,\n batch_size: int = 1000,\n ) -> Tuple[csr_matrix, ...]:\n \"\"\"\n Process and validate data, data mask and optionally any number of additional masks. These masks will all be applied\n to the data when performing data range validation, in case of e.g. dummy zero data that is masked out by an\n additional obs_mask.\n\n Args:\n data: Unprocessed data array\n data_mask: Data indicating which values in `data` are observed. Can be any dtype provided all values are\n either 0 or 1.\n extra_masks: Additional masks to be processed, if any. Can be any dtype provided all values are either 0 or\n 1.\n batch_size: Batch size used during data preprocessing for sparse matrices.\n Returns:\n processed_data: Data with categorical variables expanded to a one-hot encoding, and features normalised.\n processed_data_mask: Boolean mask with categorical variables expanded to a one-hot encoding.\n processed_extra_masks: Any additional boolean masks with categorical variables expanded to a one-hot\n encoding.\n \"\"\"\n if not issparse(data):\n (\n proc_data,\n proc_data_mask,\n *proc_extra_masks,\n ) = self._process_and_check_dense(data, data_mask, *extra_masks)\n else:\n # Break sparse data into smaller batches and preprocess each as a dense array. Somewhat inefficient but\n # allows us to reuse our preprocessing functions and keeps memory usage manageable.\n proc_data_list: List[csr_matrix] = []\n proc_data_mask_list: List[csr_matrix] = []\n proc_extra_masks_lists: Tuple[List[csr_matrix], ...] = tuple([] for mask in extra_masks)\n num_rows = data.shape[0]\n for start_idx in tqdm(range(0, num_rows, batch_size), desc=\"Data preprocessing\"):\n stop_idx = min(start_idx + batch_size, num_rows)\n data_batch = data[start_idx:stop_idx].toarray()\n data_mask_batch = data_mask[start_idx:stop_idx].toarray()\n extra_masks_batch = tuple(mask[start_idx:stop_idx].toarray() for mask in extra_masks)\n\n # TODO: we will currently lose sparsity for rescaled continuous data here, since 0 will be mapped to\n # another value. We could multiply by the mask to zero out unobserved data but we need to make sure this\n # doesn't have any unintended consequences for cases with more complex masking, e.g. active learning\n (\n proc_data_batch,\n proc_data_mask_batch,\n *proc_extra_masks_batch,\n ) = self._process_and_check_dense(data_batch, data_mask_batch, *extra_masks_batch)\n proc_data_list.append(csr_matrix(proc_data_batch))\n proc_data_mask_list.append(csr_matrix(proc_data_mask_batch))\n for mask_list, mask in zip(proc_extra_masks_lists, proc_extra_masks_batch):\n mask_list.append(csr_matrix(mask))\n\n proc_data = sparse.vstack(proc_data_list, format=\"csr\")\n proc_data_mask = sparse.vstack(proc_data_mask_list, format=\"csr\")\n proc_extra_masks = tuple(\n sparse.vstack(proc_mask_list, format=\"csr\") for proc_mask_list in proc_extra_masks_lists\n )\n\n return (proc_data, proc_data_mask, *proc_extra_masks)\n\n def _process_and_check_dense(self, data: np.ndarray, data_mask: np.ndarray, *extra_masks: np.ndarray):\n \"\"\"\n Check validity of dense data and masks and process them.\n \"\"\"\n combined_mask = data_mask\n for mask in extra_masks:\n combined_mask = combined_mask * mask\n self.check_data(data, combined_mask)\n self.check_mask(data_mask)\n for mask in extra_masks:\n self.check_mask(mask)\n proc_data = self.process_data(data)\n proc_data_mask = self.process_mask(data_mask)\n proc_extra_masks = tuple(self.process_mask(mask) for mask in extra_masks)\n return (proc_data, proc_data_mask, *proc_extra_masks)\n\n def process_intervention_data(\n self, intervention_data: Union[InterventionData, Iterable[InterventionData]]\n ) -> List[InterventionData]:\n \"\"\"Preprocesses data in the InterventionData format and returns a list of processed InterventionData objects.\n\n\n\n Args:\n intervention_data (Union[InterventionData, Iterable[InterventionData]]): InterventionData object or list of\n InterventionData objects to be processed.\n\n Returns:\n List[InterventionData]: List of processed InterventionData objects.\n \"\"\"\n if isinstance(intervention_data, InterventionData):\n intervention_data = [intervention_data]\n\n proc_intervention = [\n InterventionData(\n i.intervention_idxs,\n self.process_data_subset_by_group(i.intervention_values, i.intervention_idxs),\n self.process_data(i.test_data),\n i.conditioning_idxs,\n self.process_data_subset_by_group(i.conditioning_values, i.conditioning_idxs),\n i.effect_idxs,\n self.process_data_subset_by_group(i.intervention_reference, i.intervention_idxs),\n self.process_data(i.reference_data) if i.reference_data is not None else None,\n )\n for i in intervention_data\n ]\n\n return proc_intervention\n\n def process_dataset(\n self, dataset: Union[Dataset, CausalDataset, SparseDataset]\n ) -> Union[Dataset, CausalDataset, SparseDataset]:\n train_data, train_mask = self.process_data_and_masks(*dataset.train_data_and_mask)\n val_data, _ = dataset.val_data_and_mask\n if val_data is not None:\n val_data, val_mask = self.process_data_and_masks(*dataset.val_data_and_mask)\n else:\n val_data, val_mask = None, None\n test_data, _ = dataset.test_data_and_mask\n if test_data is not None:\n test_data, test_mask = self.process_data_and_masks(*dataset.test_data_and_mask)\n else:\n test_data, test_mask = None, None\n\n if isinstance(dataset, CausalDataset):\n if dataset._intervention_data is not None:\n proc_intervention = self.process_intervention_data(dataset._intervention_data)\n else:\n proc_intervention = None\n\n # process counterfactual data\n if dataset._counterfactual_data is not None:\n proc_counterfactual = self.process_intervention_data(dataset._counterfactual_data)\n else:\n proc_counterfactual = None\n return CausalDataset(\n train_data,\n train_mask,\n dataset._adjacency_data,\n dataset._subgraph_data,\n proc_intervention,\n proc_counterfactual,\n val_data,\n val_mask,\n test_data,\n test_mask,\n variables=dataset.variables,\n data_split=dataset.data_split,\n true_posterior=dataset.true_posterior,\n graph_args=dataset.graph_args,\n )\n elif isinstance(dataset, (SparseDataset, Dataset)):\n return type(dataset)(\n train_data,\n train_mask,\n val_data,\n val_mask,\n test_data,\n test_mask,\n variables=dataset.variables,\n )\n else:\n raise TypeError(f\"Unsupported dataset type: {type(dataset)}\")\n\n def check_mask(self, mask: np.ndarray) -> None:\n \"\"\"\n Check mask contains 1 and 0 only\n \"\"\"\n if len(mask.shape) != 2 or mask.shape[1] != len(self._variables):\n raise ValueError(\n \"Mask must be 2D with shape (row_count, feature_count + aux_count).\"\n f\"Mask has shape {mask.shape} and feature_count is {len(self._variables)}.\"\n )\n bool_mask = mask.astype(bool)\n\n if not np.array_equal(mask, bool_mask):\n raise ValueError(\"Mask must contain 1 and 0 only.\")\n\n def check_data(self, data: np.ndarray, mask: np.ndarray) -> None:\n \"\"\"\n Check that each column of the data is valid with respect to the given variable definition.\n Raise an error if a discrete variable (binary or categorical) is not an integer or not within the specified range.\n Make a warning if a continuous variable is not within the specified range.\n Note that only observed values are checked.\n\n Args:\n variables: Variables object for data\n data: Unprocessed data array with shape (num_rows, num_features)\n mask: Mask indicting observed variables with shape (num_rows, num_features). 1 is observed, 0 is un-observed.\n \"\"\"\n lower = np.array([var.lower for var in self._variables])\n upper = np.array([var.upper for var in self._variables])\n\n # Continuous variables\n cts_idxs = self._variables.continuous_idxs\n if len(cts_idxs) > 0:\n self.check_continuous_data(\n data=data[:, cts_idxs],\n mask=mask[:, cts_idxs],\n lower=lower[cts_idxs],\n upper=upper[cts_idxs],\n epsilon=EPSILON,\n )\n\n # Discrete variables\n disc_idxs = self._variables.discrete_idxs\n if len(disc_idxs) > 0:\n self.check_discrete_data(\n data=data[:, disc_idxs],\n mask=mask[:, disc_idxs],\n lower=lower[disc_idxs],\n upper=upper[disc_idxs],\n epsilon=EPSILON,\n )\n\n def check_continuous_data(\n self,\n data: np.ndarray,\n mask: np.ndarray,\n lower: np.ndarray,\n upper: np.ndarray,\n epsilon: float,\n ) -> None:\n \"\"\"\n Check if values in each column of the given continuous data are in the specified range. Make a warning\n if there is at least one value outside of the specified range. Note that only observed values are checked.\n\n Args:\n data: Unprocessed data array with shape (num_rows, num_features)\n mask: Mask indicting observed variables with shape (num_rows, num_features). 1 is observed, 0 is un-observed.\n lower: Array of column lower bounds with shape (num_features,)\n upper: Array of column upper bounds with shape (num_features,)\n epsilon: How close to the specified range we require values to be\n \"\"\"\n # type annotation to avoid mypy error\n lower_diff: np.ndarray = data - lower\n higher_diff: np.ndarray = data - upper\n too_low_cols = np.any(lower_diff * mask < -1 * epsilon, axis=0)\n too_high_cols = np.any(higher_diff * mask > epsilon, axis=0)\n\n too_low = np.any(too_low_cols)\n too_high = np.any(too_high_cols)\n\n if too_low:\n warnings.warn(\n f\"Data too low for continous variables {np.where(too_low_cols)[0]}\",\n UserWarning,\n )\n if too_high:\n warnings.warn(\n f\"Data too high for continous variables {np.where(too_high_cols)[0]}\",\n UserWarning,\n )\n\n def check_discrete_data(\n self,\n data: np.ndarray,\n mask: np.ndarray,\n lower: np.ndarray,\n upper: np.ndarray,\n epsilon: float,\n ) -> None:\n \"\"\"\n Check if values in each column of the given discrete (binary and categorical) data are in the specified range.\n Raise an error if there is at least one value outside of the specified range.\n Additionally, assert that all the given values are integers. Note that only observed values are checked.\n\n Args:\n data: Unprocessed data array with shape (num_rows, num_features)\n mask: Mask indicting observed variables with shape (num_rows, num_features). 1 is observed, 0 is un-observed.\n lower: Array of column lower bounds with shape (num_features,)\n upper: Array of column upper bounds with shape (num_features,)\n epsilon: How close to the specified range we require values to be\n \"\"\"\n lower_diff: np.ndarray = data - lower\n higher_diff: np.ndarray = data - upper\n too_low_cols = np.any(lower_diff * mask < -1 * epsilon, axis=0)\n too_high_cols = np.any(higher_diff * mask > epsilon, axis=0)\n\n too_low = np.any(too_low_cols)\n too_high = np.any(too_high_cols)\n\n if too_low and too_high:\n raise ValueError(\n f\"Data too low for discrete variables {np.where(too_low_cols)[0]} \\n\"\n f\"Data too high for discrete variables {np.where(too_high_cols)[0]}\"\n )\n if too_low:\n raise ValueError(f\"Data too low for discrete variables {np.where(too_low_cols)[0]}\")\n if too_high:\n raise ValueError(f\"Data too high for discrete variables {np.where(too_high_cols)[0]}\")\n\n # Check all unmasked values are integer-valued.\n observed_data: np.ndarray = data * mask\n is_integer = np.floor_divide(observed_data, 1) == observed_data\n assert np.all(is_integer)\n\n def process_data(self, data: np.ndarray) -> np.ndarray:\n \"\"\"\n Returns the processed data and fits the normalizers the first time executed.\n\n Args:\n data: Array of shape (num_rows, feature_count + aux_count)\n or (num_timeseries, num_timesteps, feature_count + aux_count). If it's temporal data\n the data will be flattened (num_rows, feature_count + aux_count) and the columns\n will be processed irrespective of the timeseries.\n Returns:\n processed_data: Array of shape (num_rows, num_processed_cols) or (num_timeseries, num_timesteps, num_processed_cols)\n \"\"\"\n\n is_temporal = len(data.shape) == 3\n\n if is_temporal:\n orig_shape = data.shape\n data = data.reshape((np.prod(orig_shape[:2]), -1))\n\n num_rows, _ = data.shape\n\n # If all features are binary, no processing required so short-circuit here\n if len(self._cts_unproc_cols) == 0 and len(self._cat_unproc_cols) == 0:\n return data.astype(float)\n\n processed_data = np.full((num_rows, self._num_processed_cols), fill_value=np.nan)\n\n # Iterate through each contiguous subarray of features of each type. Can guarantee that these regions will line\n # up between processed and unprocessed arrays since we don't change the feature order. We do this since\n # accessing/writing slices is much more efficient in NumPy than fancy indexing.\n # TODO: if we can sort/unsort features by type during processing without breaking anything, then we can simply\n # do one slice of the array per feature type and not need all this extra complexity.\n\n if self._bin_unproc_cols:\n for unproc_region, proc_region in zip(self._bin_unproc_regions, self._bin_proc_regions):\n unproc_start, unproc_end = unproc_region[0], unproc_region[-1] + 1\n proc_start, proc_end = proc_region[0], proc_region[-1] + 1\n processed_data[:, proc_start:proc_end] = data[:, unproc_start:unproc_end].astype(float)\n\n if self._cts_unproc_cols:\n for unproc_region, proc_region, normalizer in zip(\n self._cts_unproc_regions, self._cts_proc_regions, self._cts_normalizers\n ):\n unproc_start, unproc_end = unproc_region[0], unproc_region[-1] + 1\n proc_start, proc_end = proc_region[0], proc_region[-1] + 1\n cts_unproc_data = data[:, unproc_start:unproc_end].astype(float)\n\n # Make sure the transform has been fitted\n try:\n check_is_fitted(normalizer)\n except NotFittedError:\n normalizer.fit(cts_unproc_data)\n\n processed_data[:, proc_start:proc_end] = normalizer.transform(cts_unproc_data)\n\n if self._cat_unproc_cols:\n # Don't currently split into separate contiguous subarrays for categorical vars since we only want a single\n # one-hot encoder for simplicity.\n cat_unproc_data = data[:, self._cat_unproc_cols].astype(float)\n processed_data[:, self._cat_proc_cols] = self._one_hot_encoder.transform(cat_unproc_data)\n\n if self._txt_unproc_cols:\n processed_data[:, self._txt_proc_cols] = self._text_embedder.encode(data[:, self._txt_unproc_cols])\n\n if is_temporal:\n processed_data = processed_data.reshape(list(orig_shape[:-1]) + [-1])\n\n return processed_data\n\n def process_data_subset_by_group(\n self, data: Optional[np.ndarray], idxs: Optional[np.ndarray]\n ) -> Optional[np.ndarray]:\n \"\"\"\n Args:\n data: Array of shape (num_rows, num_unprocessed_cols_subset) or (num_unprocessed_cols_subset)\n or (num_timeseries, num_rows, num_unprocessed_cols_subset).\n Data should be ordered by group, and then by variables within that group in the same\n order as the main dataset. If the data is temporal it will be flattened to\n (num_rows, num_unprocessed_cols_subset) and the columnswill be processed irrespective of the timeseries.\n idxs: Array indicating the ordered indices of the groups represented in the data.\n Returns:\n processed_data: Array of shape (num_rows, num_processed_cols_subset) or (num_processed_cols_subset)\n or (num_timeseries, num_rows, num_processed_cols_subset)\n \"\"\"\n # Add statement idxs is None, to avoid mypy error: None type has no __iter__. I assume if data is None or idxs is None, just return None.\n if data is None or idxs is None: # Helpful when calling from `process_dataset`\n return None\n\n is_temporal = len(data.shape) == 3\n\n # For temporal data, remove time index from idxs.\n if idxs.ndim > 1:\n idxs = idxs[..., 0]\n\n if is_temporal:\n orig_shape = data.shape\n data = data.reshape((np.prod(orig_shape[:2]), -1))\n\n if len(data.shape) == 0:\n data = np.array([data.item()])\n num_rows = 1\n elif len(data.shape) == 1:\n num_rows = 1\n else:\n num_rows, _ = data.shape\n pseudodata = np.zeros((num_rows, self._variables.num_processed_cols))\n\n start = 0\n for i in idxs:\n for j in self._variables.group_idxs[i]:\n unproc_dim = self._variables[j].unprocessed_dim\n pseudodata[:, self._variables.unprocessed_cols[j]] = data[..., start : (start + unproc_dim)]\n start += unproc_dim\n\n processed_pseudodata = self.process_data(pseudodata)\n\n output_num_cols = self._variables.group_mask[idxs, :].sum()\n return_data = np.full((num_rows, output_num_cols), fill_value=np.nan)\n\n start = 0\n for i in idxs:\n for j in self._variables.group_idxs[i]:\n proc_dim = self._variables[j].processed_dim\n return_data[:, start : (start + proc_dim)] = processed_pseudodata[:, self._variables.processed_cols[j]]\n start += proc_dim\n\n if len(data.shape) == 1:\n return_data = return_data.squeeze(0)\n\n if is_temporal:\n return_data = return_data.reshape(list(orig_shape[:-1]) + [-1])\n\n return return_data\n\n def process_mask(self, mask: V) -> V:\n \"\"\"\n Args:\n mask: Array/Tensor of shape (num_rows, feature_count + aux_count) taking values 0 or 1\n Returns:\n processed_mask: Boolean array of shape (num_rows, num_processed_cols)\n \"\"\"\n num_rows, _ = mask.shape\n\n if isinstance(mask, np.ndarray): # If numpy array opperate on bools\n processed_mask = np.zeros((num_rows, self._num_processed_cols), dtype=bool)\n elif isinstance(mask, torch.Tensor): # If torch tensors operate on floats\n processed_mask = torch.zeros(\n (num_rows, self._num_processed_cols),\n dtype=mask.dtype,\n device=mask.device,\n )\n else:\n raise ValueError(\"Wrong type of mask object\")\n\n if self._bin_unproc_cols:\n for unproc_region, proc_region in zip(self._bin_unproc_regions, self._bin_proc_regions):\n unproc_start, unproc_end = unproc_region[0], unproc_region[-1] + 1\n proc_start, proc_end = proc_region[0], proc_region[-1] + 1\n processed_mask[:, proc_start:proc_end] = mask[:, unproc_start:unproc_end]\n\n if self._cts_unproc_cols:\n for unproc_region, proc_region in zip(self._cts_unproc_regions, self._cts_proc_regions):\n unproc_start, unproc_end = unproc_region[0], unproc_region[-1] + 1\n proc_start, proc_end = proc_region[0], proc_region[-1] + 1\n processed_mask[:, proc_start:proc_end] = mask[:, unproc_start:unproc_end]\n\n if self._cat_unproc_cols:\n for var, proc_cols in zip(self._cat_unproc_cols, self._cat_proc_cols_grouped):\n # Index with var:var+1 to return 2D array rather than 1D to allow broadcasting.\n processed_mask[:, proc_cols] = mask[:, var : var + 1]\n\n if self._txt_unproc_cols:\n for var, proc_cols in zip(self._txt_unproc_cols, self._txt_proc_cols_grouped):\n # Index with var:var+1 to return 2D array rather than 1D to allow broadcasting.\n processed_mask[:, proc_cols] = mask[:, var : var + 1]\n\n return processed_mask\n\n def revert_mask(self, mask: V) -> V:\n \"\"\"\n Revert processed mask into unprocessed form (i.e. squash categorical/text var indices).\n\n Args:\n variables:\n mask: Numpy array/Torch tensor with shape (num_rows, input_count)\n\n Returns:\n data: Numpy array/Torch tensor with shape (num_rows, feature_count + aux_count)\n \"\"\"\n proc_cols_to_delete = []\n for idx, var in enumerate(self._variables):\n if var.type_ not in {\"categorical\", \"text\"} and var.overwrite_processed_dim is not None:\n continue\n cols = self._variables.processed_cols[idx]\n # Delete all columns except for first one\n proc_cols_to_delete += cols[1:]\n proc_cols_to_stay = [col for col in range(mask.shape[1]) if col not in proc_cols_to_delete]\n return mask[:, proc_cols_to_stay]\n\n def revert_data(self, data: np.ndarray) -> np.ndarray:\n \"\"\"\n Undo processing to return output in the same form as the input. Sort-of-inverse of process_data.\n This involves reversing the squash operation for continuous variables, changing one-hot\n categorical variables into a single natural number and reordering data.\n\n Args:\n data: Numpy array with shape (num_rows, input_count)\n\n Returns:\n data: Numpy array with shape (num_rows, feature_count + aux_count)\n \"\"\"\n # revert_data() is only called on imputed data, which is inherently dense, so we assume a sparse matrix is never\n # passed into this method.\n\n num_rows, _ = data.shape\n\n unprocessed_data = np.empty((num_rows, self._variables.num_unprocessed_cols), dtype=object)\n\n if self._bin_unproc_cols:\n for unproc_region, proc_region in zip(self._bin_unproc_regions, self._bin_proc_regions):\n unproc_start, unproc_end = unproc_region[0], unproc_region[-1] + 1\n proc_start, proc_end = proc_region[0], proc_region[-1] + 1\n unprocessed_data[:, unproc_start:unproc_end] = data[:, proc_start:proc_end]\n\n if self._cts_unproc_cols:\n for unproc_region, proc_region, normalizer in zip(\n self._cts_unproc_regions, self._cts_proc_regions, self._cts_normalizers\n ):\n unproc_start, unproc_end = unproc_region[0], unproc_region[-1] + 1\n proc_start, proc_end = proc_region[0], proc_region[-1] + 1\n unprocessed_data[:, unproc_start:unproc_end] = normalizer.inverse_transform(\n data[:, proc_start:proc_end]\n )\n\n if self._cat_unproc_cols:\n unprocessed_data[:, self._cat_unproc_cols] = self._one_hot_encoder.inverse_transform(\n data[:, self._cat_proc_cols]\n )\n\n if self._txt_unproc_cols:\n unprocessed_data[:, self._txt_unproc_cols] = self._text_embedder.decode(data[:, self._txt_proc_cols])\n\n return unprocessed_data\n\n @staticmethod\n def split_contiguous_sublists(ints: List[int]) -> List[List[int]]:\n \"\"\"\n Map from list of ints to list of contiguous sublists. E.g. [1,2,4,6,7] -> [[1,2],[4],[6,7]]. Assumes input list\n is sorted.\n \"\"\"\n out: List[List[int]] = []\n for i in ints:\n if len(out) == 0:\n out.append([i])\n elif i == out[-1][-1] + 1:\n out[-1].append(i)\n else:\n out.append([i])\n return out" } ]
from abc import ABC, abstractmethod from typing import Any, Callable, Dict, Optional, Tuple, Union from ..datasets.dataset import Dataset from ..datasets.variables import Variables from ..preprocessing.data_processor import DataProcessor import numpy as np import torch
15,441
# This is required in python 3 to allow return types of the same class. from __future__ import annotations class IModel(ABC): """ Interface for model: create: Create an instance of the concrete class. load: Load an instance of the concrete class from a given directory. save: Save any data needed to load the model. name: Name of objective, to use when finding model to use from string. run_train: Train the model. impute: Impute missing values: """
# This is required in python 3 to allow return types of the same class. from __future__ import annotations class IModel(ABC): """ Interface for model: create: Create an instance of the concrete class. load: Load an instance of the concrete class from a given directory. save: Save any data needed to load the model. name: Name of objective, to use when finding model to use from string. run_train: Train the model. impute: Impute missing values: """
def __init__(self, model_id: str, variables: Variables, save_dir: str) -> None:
1
2023-11-21 12:55:08+00:00
24k
Yifei-Y/Openset-RCNN
openset_rcnn/evaluation/os_coco_evaluation.py
[ { "identifier": "GRASPNET_KNOWN_IDS", "path": "openset_rcnn/data/graspnet_meta.py", "snippet": "GRASPNET_KNOWN_IDS = [graspnet_known_name_id_dic[name_cat] for name_cat in GRASPNET_KNOWN_CATEGORIES]" }, { "identifier": "GRASPNET_KNOWN_CATEGORIES", "path": "openset_rcnn/data/graspnet_meta.py", "snippet": "GRASPNET_KNOWN_CATEGORIES = [\n \"cracker_box\", \"tomato_soup_can\", \"banana\", \"mug\", \"power_drill\", \"scissors\", \"strawberry\",\n \"peach\", \"plum\", \"knife\", \"flat_screwdriver\", \"racquetball\", \"b_cups\", \"d_toy_airplane\",\n \"f_toy_airplane\", \"i_toy_airplane\", \"j_toy_airplane\", \"dabao_sod\", \"darlie_toothpaste\",\n \"camel\", \"large_elephant\", \"rhinocero\", \"darlie_box\", \"black_mouse\", \"dabao_facewash\",\n \"pantene\", \"head_shoulders_supreme\", \"head_shoulders_care\"\n]" }, { "identifier": "OpensetCOCOEval", "path": "openset_rcnn/evaluation/os_cocoeval.py", "snippet": "class OpensetCOCOEval(COCOeval):\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n k_gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n unk_gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=1000))\n k_dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n unk_dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=1000))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(k_gts, self.cocoGt)\n _toMask(unk_gts, self.cocoGt)\n _toMask(k_dts, self.cocoDt)\n _toMask(unk_dts, self.cocoDt)\n # set ignore flag\n for kgt in k_gts:\n kgt['ignore'] = kgt['ignore'] if 'ignore' in kgt else 0\n kgt['ignore'] = 'iscrowd' in kgt and kgt['iscrowd']\n for ugt in unk_gts:\n ugt['ignore'] = ugt['ignore'] if 'ignore' in ugt else 0\n ugt['ignore'] = 'iscrowd' in ugt and ugt['iscrowd']\n self._k_gts = defaultdict(list) # gt for evaluation\n self._ok_gts = defaultdict(list)\n self._unk_gts = defaultdict(list)\n self._k_dts = defaultdict(list) # dt for evaluation\n self._unk_dts = defaultdict(list)\n for kgt in k_gts:\n self._k_gts[kgt['image_id'], kgt['category_id']].append(kgt)\n for cId in p.catIds:\n for kgt in k_gts:\n if kgt['category_id'] != cId:\n self._ok_gts[kgt['image_id'], cId].append(kgt)\n for ugt in unk_gts:\n self._unk_gts[ugt['image_id']].append(ugt)\n for kdt in k_dts:\n self._k_dts[kdt['image_id'], kdt['category_id']].append(kdt)\n for udt in unk_dts:\n self._unk_dts[udt['image_id']].append(udt)\n self.evalImgs_kdt = defaultdict(list) # per-image per-category evaluation results\n self.evalImgs_unkdt = defaultdict(list)\n self.eval_kdt = {} # accumulated evaluation results\n self.eval_unkdt = {}\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n self.ious_kdt_kgt = {(imgId, catId): self.computeIoU_kdt_kgt(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n self.ious_kdt_okgt = {(imgId, catId): self.computeIoU_kdt_okgt(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n self.ious_kdt_unkgt = {(imgId, catId): self.computeIoU_kdt_unkgt(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n self.ious_unkdt_kgt = {(imgId): self.computeIoU_unkdt_kgt(imgId) for imgId in p.imgIds}\n self.ious_unkdt_unkgt = {(imgId): self.computeIoU_unkdt_unkgt(imgId) for imgId in p.imgIds}\n \n maxDet = p.maxDets[-1]\n self.evalImgs_kdt = [self.evaluateImg_kdt(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self.evalImgs_unkdt = [self.evaluateImg_unkdt(imgId, areaRng, maxDet)\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n \n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU_kdt_kgt(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._k_gts[imgId,catId]\n dt = self._k_dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._k_gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._k_dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_kdt_okgt(self, imgId, catId):\n p = self.params\n gt = self._ok_gts[imgId, catId]\n dt = self._k_dts[imgId,catId]\n \n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_kdt_unkgt(self, imgId, catId):\n p = self.params\n gt = self._unk_gts[imgId]\n dt = self._k_dts[imgId,catId]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_unkdt_kgt(self, imgId):\n p = self.params\n gt = [_ for cId in p.catIds for _ in self._k_gts[imgId,cId]]\n dt = self._unk_dts[imgId]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_unkdt_unkgt(self, imgId):\n p = self.params\n gt = self._unk_gts[imgId]\n dt = self._unk_dts[imgId]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n\n def evaluateImg_kdt(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n\n k_gt = self._k_gts[imgId,catId]\n ok_gt = self._ok_gts[imgId,catId]\n unk_gt = self._unk_gts[imgId]\n k_dt = self._k_dts[imgId,catId]\n\n for kg in k_gt:\n if kg['ignore'] or (kg['area']<aRng[0] or kg['area']>aRng[1]):\n kg['_ignore'] = 1\n else:\n kg['_ignore'] = 0\n for okg in ok_gt:\n if okg['ignore'] or (okg['area']<aRng[0] or okg['area']>aRng[1]):\n okg['_ignore'] = 1\n else:\n okg['_ignore'] = 0\n for ug in unk_gt:\n if ug['ignore'] or (ug['area']<aRng[0] or ug['area']>aRng[1]):\n ug['_ignore'] = 1\n else:\n ug['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n k_gtind = np.argsort([kg['_ignore'] for kg in k_gt], kind='mergesort')\n k_gt = [k_gt[i] for i in k_gtind]\n ok_gtind = np.argsort([okg['_ignore'] for okg in ok_gt], kind='mergesort')\n ok_gt = [ok_gt[i] for i in ok_gtind]\n unk_gtind = np.argsort([ug['_ignore'] for ug in unk_gt], kind='mergesort')\n unk_gt = [unk_gt[i] for i in unk_gtind]\n k_dtind = np.argsort([-kd['score'] for kd in k_dt], kind='mergesort')\n k_dt = [k_dt[i] for i in k_dtind[0:maxDet]]\n k_iscrowd = [int(o['iscrowd']) for o in k_gt]\n ok_iscrowd = [int(o['iscrowd']) for o in ok_gt]\n unk_iscrowd = [int(o['iscrowd']) for o in unk_gt]\n # load computed ious\n ious_kgt = (\n self.ious_kdt_kgt[imgId, catId][:, k_gtind] \\\n if len(self.ious_kdt_kgt[imgId, catId]) > 0 else self.ious_kdt_kgt[imgId, catId]\n )\n ious_okgt = (\n self.ious_kdt_okgt[imgId, catId][:, ok_gtind] \\\n if len(self.ious_kdt_okgt[imgId, catId]) > 0 else self.ious_kdt_okgt[imgId, catId]\n )\n ious_unkgt = (\n self.ious_kdt_unkgt[imgId, catId][:, unk_gtind] \\\n if len(self.ious_kdt_unkgt[imgId, catId]) > 0 else self.ious_kdt_unkgt[imgId, catId]\n )\n\n T = len(p.iouThrs)\n KG = len(k_gt)\n OKG = len(ok_gt)\n UG = len(unk_gt)\n KD = len(k_dt)\n kgtm = np.zeros((T,KG))\n okgtm = np.zeros((T,OKG))\n unkgtm = np.zeros((T,UG))\n kdtm_kgt = np.zeros((T,KD))\n kdtm_okgt = np.zeros((T,KD))\n kdtm_unkgt = np.zeros((T,KD))\n kgtIg = np.array([kg['_ignore'] for kg in k_gt])\n okgtIg = np.array([okg['_ignore'] for okg in ok_gt])\n unkgtIg = np.array([ug['_ignore'] for ug in unk_gt])\n kdtIg_kgt = np.zeros((T,KD))\n kdtIg_okgt = np.zeros((T,KD))\n kdtIg_unkgt = np.zeros((T,KD))\n\n if not len(ious_kgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for kdind, kd in enumerate(k_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for kgind, kg in enumerate(k_gt):\n # if this gt already matched, and not a crowd, continue\n if kgtm[tind,kgind]>0 and not k_iscrowd[kgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and kgtIg[m]==0 and kgtIg[kgind]==1:\n break\n # continue to next gt unless better match made\n if ious_kgt[kdind,kgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_kgt[kdind,kgind]\n m=kgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n kdtIg_kgt[tind,kdind] = kgtIg[m]\n kdtm_kgt[tind,kdind] = k_gt[m]['id']\n kgtm[tind,m] = kd['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([kd['area']<aRng[0] or kd['area']>aRng[1] for kd in k_dt]).reshape((1, len(k_dt)))\n kdtIg_kgt = np.logical_or(kdtIg_kgt, np.logical_and(kdtm_kgt==0, np.repeat(a,T,0)))\n\n if not len(ious_okgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for kdind, kd in enumerate(k_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for okgind, okg in enumerate(ok_gt):\n # if this gt already matched, and not a crowd, continue\n if okgtm[tind,okgind]>0 and not ok_iscrowd[okgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and okgtIg[m]==0 and okgtIg[okgind]==1:\n break\n # continue to next gt unless better match made\n if ious_okgt[kdind,okgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_okgt[kdind,okgind]\n m=okgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n kdtIg_okgt[tind,kdind] = okgtIg[m]\n kdtm_okgt[tind,kdind] = ok_gt[m]['id']\n okgtm[tind,m] = kd['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([kd['area']<aRng[0] or kd['area']>aRng[1] for kd in k_dt]).reshape((1, len(k_dt)))\n kdtIg_okgt = np.logical_or(kdtIg_okgt, np.logical_and(kdtm_okgt==0, np.repeat(a,T,0)))\n\n if not len(ious_unkgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for kdind, kd in enumerate(k_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for unkgind, unkg in enumerate(unk_gt):\n # if this gt already matched, and not a crowd, continue\n if unkgtm[tind,unkgind]>0 and not unk_iscrowd[unkgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and unkgtIg[m]==0 and unkgtIg[unkgind]==1:\n break\n # continue to next gt unless better match made\n if ious_unkgt[kdind,unkgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_unkgt[kdind,unkgind]\n m=unkgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n kdtIg_unkgt[tind,kdind] = unkgtIg[m]\n kdtm_unkgt[tind,kdind] = unk_gt[m]['id']\n unkgtm[tind,m] = kd['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([kd['area']<aRng[0] or kd['area']>aRng[1] for kd in k_dt]).reshape((1, len(k_dt)))\n kdtIg_unkgt = np.logical_or(kdtIg_unkgt, np.logical_and(kdtm_unkgt==0, np.repeat(a,T,0)))\n\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'kdtIds': [kd['id'] for kd in k_dt],\n 'kgtIds': [kg['id'] for kg in k_gt],\n 'okgtIds': [okg['id'] for okg in ok_gt],\n 'unkgtIds': [ug['id'] for ug in unk_gt],\n 'Matches_kdt_kgt': kdtm_kgt,\n 'Matches_kdt_okgt': kdtm_okgt,\n 'Matches_kdt_unkgt': kdtm_unkgt,\n 'kgtMatches': kgtm,\n 'okgtMatches': okgtm,\n 'unkgtMatches': unkgtm,\n 'kdtScores': [kd['score'] for kd in k_dt],\n 'kgtIgnore': kgtIg,\n 'okgtIgnore': okgtIg,\n 'unkgtIgnore': unkgtIg,\n 'kdtIgnore_kgt': kdtIg_kgt,\n 'kdtIgnore_okgt': kdtIg_okgt,\n 'kdtIgnore_unkgt': kdtIg_unkgt,\n }\n \n def evaluateImg_unkdt(self, imgId, aRng, maxDet):\n '''\n '''\n p = self.params\n k_gt = [_ for cId in p.catIds for _ in self._k_gts[imgId,cId]]\n unk_gt = self._unk_gts[imgId]\n unk_dt = self._unk_dts[imgId]\n if len(unk_gt) == 0 and len(unk_dt) == 0:\n return None\n \n for kg in k_gt:\n if kg['ignore'] or (kg['area']<aRng[0] or kg['area']>aRng[1]):\n kg['_ignore'] = 1\n else:\n kg['_ignore'] = 0\n for ug in unk_gt:\n if ug['ignore'] or (ug['area']<aRng[0] or ug['area']>aRng[1]):\n ug['_ignore'] = 1\n else:\n ug['_ignore'] = 0\n \n # sort dt highest score first, sort gt ignore last\n kgtind = np.argsort([kg['_ignore'] for kg in k_gt], kind='mergesort')\n k_gt = [k_gt[i] for i in kgtind]\n unk_gtind = np.argsort([ug['_ignore'] for ug in unk_gt], kind='mergesort')\n unk_gt = [unk_gt[i] for i in unk_gtind]\n udtind = np.argsort([-ud['score'] for ud in unk_dt], kind='mergesort')\n unk_dt = [unk_dt[i] for i in udtind[0:maxDet]]\n k_iscrowd = [int(o['iscrowd']) for o in k_gt]\n unk_iscrowd = [int(o['iscrowd']) for o in unk_gt]\n\n # load computed ious\n ious_kgt = (\n self.ious_unkdt_kgt[imgId][:, kgtind] \\\n if len(self.ious_unkdt_kgt[imgId]) > 0 else self.ious_unkdt_kgt[imgId]\n )\n ious_unkgt = (\n self.ious_unkdt_unkgt[imgId][:, unk_gtind] \\\n if len(self.ious_unkdt_unkgt[imgId]) > 0 else self.ious_unkdt_unkgt[imgId]\n )\n\n T = len(p.iouThrs)\n KG = len(k_gt)\n UG = len(unk_gt)\n UD = len(unk_dt)\n kgtm = np.zeros((T,KG))\n unkgtm = np.zeros((T,UG))\n unkdtm_kgt = np.zeros((T,UD))\n unkdtm_unkgt = np.zeros((T,UD))\n kgtIg = np.array([g['_ignore'] for g in k_gt])\n unkgtIg = np.array([ug['_ignore'] for ug in unk_gt])\n unkdtIg_kgt = np.zeros((T,UD))\n unkdtIg_unkgt = np.zeros((T,UD))\n\n if not len(ious_kgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for udind, ud in enumerate(unk_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for kgind, kg in enumerate(k_gt):\n # if this gt already matched, and not a crowd, continue\n if kgtm[tind,kgind]>0 and not k_iscrowd[kgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and kgtIg[m]==0 and kgtIg[kgind]==1:\n break\n # continue to next gt unless better match made\n if ious_kgt[udind,kgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_kgt[udind,kgind]\n m=kgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n unkdtIg_kgt[tind,udind] = kgtIg[m]\n unkdtm_kgt[tind,udind] = k_gt[m]['id']\n kgtm[tind,m] = ud['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([ud['area']<aRng[0] or ud['area']>aRng[1] for ud in unk_dt]).reshape((1, len(unk_dt)))\n unkdtIg_kgt = np.logical_or(unkdtIg_kgt, np.logical_and(unkdtm_kgt==0, np.repeat(a,T,0)))\n\n if not len(ious_unkgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for udind, ud in enumerate(unk_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for unkgind, unkg in enumerate(unk_gt):\n # if this gt already matched, and not a crowd, continue\n if unkgtm[tind,unkgind]>0 and not unk_iscrowd[unkgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and unkgtIg[m]==0 and unkgtIg[unkgind]==1:\n break\n # continue to next gt unless better match made\n if ious_unkgt[udind,unkgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_unkgt[udind,unkgind]\n m=unkgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n unkdtIg_unkgt[tind,udind] = unkgtIg[m]\n unkdtm_unkgt[tind,udind] = unk_gt[m]['id']\n unkgtm[tind,m] = ud['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([ud['area']<aRng[0] or ud['area']>aRng[1] for ud in unk_dt]).reshape((1, len(unk_dt)))\n unkdtIg_unkgt = np.logical_or(unkdtIg_unkgt, np.logical_and(unkdtm_unkgt==0, np.repeat(a,T,0)))\n\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'unkdtIds': [ud['id'] for ud in unk_dt],\n 'kgtIds': [kg['id'] for kg in k_gt],\n 'unkgtIds': [ug['id'] for ug in unk_gt],\n 'Matches_unkdt_kgt': unkdtm_kgt,\n 'Matches_unkdt_unkgt': unkdtm_unkgt,\n 'kgtMatches': kgtm,\n 'unkgtMatches': unkgtm,\n 'unkdtScores': [ud['score'] for ud in unk_dt],\n 'kgtIgnore': kgtIg,\n 'unkgtIgnore': unkgtIg,\n 'unkdtIgnore_kgt': unkdtIg_kgt,\n 'unkdtIgnore_unkgt': unkdtIg_unkgt,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results of known detections...')\n tic = time.time()\n if not self.evalImgs_kdt or not self.evalImgs_unkdt:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n ok_det_as_known = np.zeros((T,K,A,M))\n unk_det_as_known = np.zeros((T,K,A,M))\n fp_os = np.zeros((T,R,K,A,M))\n tp_plus_fp_cs = np.zeros((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs_kdt[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['kdtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n \n dtScoresSortedExpand = np.expand_dims(dtScoresSorted, 0)\n dtScoresSortedExpand = np.repeat(dtScoresSortedExpand, T, 0)\n kdtm_kgt = np.concatenate([e['Matches_kdt_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtm_okgt = np.concatenate([e['Matches_kdt_okgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtm_unkgt = np.concatenate([e['Matches_kdt_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtIg_kgt = np.concatenate([e['kdtIgnore_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtIg_okgt = np.concatenate([e['kdtIgnore_okgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtIg_unkgt = np.concatenate([e['kdtIgnore_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kgtIg = np.concatenate([e['kgtIgnore'] for e in E])\n npig = np.count_nonzero(kgtIg==0)\n if npig == 0:\n continue\n tps = np.logical_and(kdtm_kgt, np.logical_not(kdtIg_kgt) )\n fps = np.logical_and(np.logical_not(kdtm_kgt), np.logical_not(kdtIg_kgt) )\n okfps = np.logical_and(kdtm_okgt, np.logical_not(kdtIg_okgt))\n ufps = np.logical_and(kdtm_unkgt, np.logical_not(kdtIg_unkgt))\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n tp_fp_sum = tp_sum + fp_sum\n okfp_sum = np.sum(okfps, axis=1).astype(dtype=np.float)\n ufp_sum = np.cumsum(ufps, axis=1).astype(dtype=np.float)\n for t, (tp, fp, tp_fp, ufp) in enumerate(zip(tp_sum, fp_sum, tp_fp_sum, ufp_sum)):\n if len(ufp):\n unk_det_as_known[t,k,a,m] = ufp[-1]\n\n ok_det_as_known[t,k,a,m] = okfp_sum[t]\n\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n tf = np.zeros((R,))\n fo = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n l = len(tp_fp)\n if l:\n for ri, pi in enumerate(inds):\n if pi == l:\n pi -= 1\n tf[ri] = tp_fp[pi]\n fo[ri] = ufp[pi]\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n tp_plus_fp_cs[t,:,k,a,m] = np.array(tf)\n fp_os[t,:,k,a,m] = np.array(fo)\n self.eval_kdt = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n 'ok_det_as_known': ok_det_as_known,\n 'unk_det_as_known': unk_det_as_known,\n 'tp_plus_fp_cs': tp_plus_fp_cs,\n 'fp_os': fp_os\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n print('Accumulating evaluation results of unknown detections...')\n tic = time.time()\n if not self.evalImgs_unkdt:\n print('Please run evaluate() first')\n \n precision = -np.ones((T,R,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,A,M))\n scores = -np.ones((T,R,A,M))\n\n num_k_det_as_unk = np.zeros((T,A,M))\n\n # retrieve E at each category, area range, and max number of detections\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs_unkdt[Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n udtScores = np.concatenate([e['unkdtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-udtScores, kind='mergesort')\n udtScoresSorted = udtScores[inds]\n\n udtm_kgt = np.concatenate([e['Matches_unkdt_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n udtm_unkgt = np.concatenate([e['Matches_unkdt_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n udtIg_kgt = np.concatenate([e['unkdtIgnore_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n udtIg_unkgt = np.concatenate([e['unkdtIgnore_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kgtIg = np.concatenate([e['kgtIgnore'] for e in E])\n unkgtIg = np.concatenate([e['unkgtIgnore'] for e in E])\n npig = np.count_nonzero(unkgtIg==0 )\n if npig == 0:\n continue\n\n tps = np.logical_and(udtm_unkgt, np.logical_not(udtIg_unkgt) )\n fps = np.logical_and(np.logical_not(udtm_unkgt), np.logical_not(udtIg_unkgt) )\n k_det_as_unk_fps = np.logical_and(udtm_kgt, np.logical_not(udtIg_kgt))\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=float)\n k_det_as_unk_fp_sum = np.cumsum(k_det_as_unk_fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp, k_det_as_unk_fp) in enumerate(zip(tp_sum, fp_sum, k_det_as_unk_fp_sum)):\n if len(k_det_as_unk_fp):\n num_k_det_as_unk[t,a,m] = k_det_as_unk_fp[-1]\n \n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,a,m] = rc[-1]\n else:\n recall[t,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = udtScoresSorted[pi]\n except:\n pass\n precision[t,:,a,m] = np.array(q)\n scores[t,:,a,m] = np.array(ss)\n \n self.eval_unkdt = {\n 'params': p,\n 'counts': [T, R, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n 'k_det_as_unk': num_k_det_as_unk\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _num_unk_det_as_known(iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {}'\n titleStr = 'UNK_det_as_K'\n typeStr = '(AOSE)'\n iouStr = '{:0.2f}'.format(iouThr)\n tind = [i for i, iouT in enumerate(p.iouThrs) if iouT == iouThr]\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n unk_det_as_known = self.eval_kdt['unk_det_as_known']\n\n self.unk_det_as_known = unk_det_as_known[tind,:,aind,mind]\n\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, np.sum(unk_det_as_known[tind,:,aind,mind])))\n print(unk_det_as_known[tind,:,aind,mind])\n \n return np.sum(unk_det_as_known[tind,:,aind,mind])\n\n def _num_k_det_as_unk(iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n iStr = ' {:<18} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {}'\n titleStr = 'K_det_as_UNK'\n iouStr = '{:0.2f}'.format(iouThr)\n tind = [i for i, iouT in enumerate(p.iouThrs) if iouT == iouThr]\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n k_det_as_unk = self.eval_unkdt['k_det_as_unk']\n\n self.k_det_as_unk = k_det_as_unk[tind,aind,mind]\n\n print(iStr.format(titleStr, iouStr, areaRng, maxDets, k_det_as_unk[tind,aind,mind]))\n \n return k_det_as_unk[tind,aind,mind]\n \n def _wi(iouThr=None, areaRng='all', maxDets=100, recall_level=0.8):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Wilderness Impact'\n typeStr = '(WI)'\n iouStr = '{:0.2f}'.format(iouThr)\n\n tind = [i for i, iouT in enumerate(p.iouThrs) if iouT == iouThr]\n rind = [i for i, recT in enumerate(p.recThrs) if recT == recall_level]\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n tp_plus_fp_cs = self.eval_kdt['tp_plus_fp_cs']\n fp_os = self.eval_kdt['fp_os']\n\n wi = np.mean(fp_os[tind,rind,:,aind,mind]) / np.mean(tp_plus_fp_cs[tind,rind,:,aind,mind])\n \n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, wi))\n\n return wi\n \n def _print_precision(iouThr=.5, areaRng='all', maxDets=100 ):\n p = self.params\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n # dimension of precision: [TxRxKxAxM]\n s = self.eval_kdt['precision']\n # IoU\n\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = np.squeeze(s[:,:,:,aind,mind])\n s = s[[10, 20, 30, 40, 50, 60, 70, 80, 90, 100],:]\n \n for i in range(s.shape[1]):\n print(s[:,i])\n\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Known Average Precision' if ap == 1 else 'Known Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval_kdt['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval_kdt['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n\n def _summarize_unk( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Unknown Average Precision' if ap == 1 else 'Unknown Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval_unkdt['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval_unkdt['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n\n def _summarizeDets():\n stats = np.zeros((30,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[-1])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[-1])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[-1])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, maxDets=self.params.maxDets[3])\n stats[10] = _summarize(0, maxDets=self.params.maxDets[4])\n stats[11] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[12] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[13] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[-1])\n stats[14] = _wi(iouThr=.5, areaRng='all', maxDets=100, recall_level=0.8)\n stats[15] = _num_unk_det_as_known(iouThr=.5, areaRng='all', maxDets=100)\n \n stats[16] = _summarize_unk(1)\n stats[17] = _summarize_unk(1, iouThr=.5, maxDets=self.params.maxDets[-1])\n stats[18] = _summarize_unk(1, iouThr=.75, maxDets=self.params.maxDets[-1])\n stats[19] = _summarize_unk(1, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[20] = _summarize_unk(1, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[21] = _summarize_unk(1, areaRng='large', maxDets=self.params.maxDets[-1])\n stats[22] = _summarize_unk(0, maxDets=self.params.maxDets[0])\n stats[23] = _summarize_unk(0, maxDets=self.params.maxDets[1])\n stats[24] = _summarize_unk(0, maxDets=self.params.maxDets[2])\n stats[25] = _summarize_unk(0, maxDets=self.params.maxDets[3])\n stats[26] = _summarize_unk(0, maxDets=self.params.maxDets[4])\n stats[27] = _summarize_unk(0, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[28] = _summarize_unk(0, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[29] = _summarize_unk(0, areaRng='large', maxDets=self.params.maxDets[-1])\n return stats\n \n if not self.eval_kdt or not self.eval_unkdt:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n self.stats = summarize()" } ]
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from detectron2.evaluation.evaluator import DatasetEvaluator from detectron2.evaluation.coco_evaluation import instances_to_coco_json from openset_rcnn.data.graspnet_meta import GRASPNET_KNOWN_IDS, GRASPNET_KNOWN_CATEGORIES from .os_cocoeval import OpensetCOCOEval
14,907
# Copyright (c) Facebook, Inc. and its affiliates. class OpensetCOCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, eval_type, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (list[int]): limit on the maximum number of detections per image. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu")
# Copyright (c) Facebook, Inc. and its affiliates. class OpensetCOCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, eval_type, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (list[int]): limit on the maximum number of detections per image. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu")
self.known_names = GRASPNET_KNOWN_CATEGORIES
1
2023-11-21 01:47:01+00:00
24k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/models/controlnet_flax.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if (\n isinstance(orig_cls_name, str)\n and orig_cls_name != cls.__name__\n and hasattr(diffusers_library, orig_cls_name)\n ):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):\n raise ValueError(\n \"Make sure that the `_class_name` is of type string or list of string (for custom pipelines).\"\n )\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "flax_register_to_config", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "def flax_register_to_config(cls):\n original_init = cls.__init__\n\n @functools.wraps(original_init)\n def init(self, *args, **kwargs):\n if not isinstance(self, ConfigMixin):\n raise RuntimeError(\n f\"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does \"\n \"not inherit from `ConfigMixin`.\"\n )\n\n # Ignore private kwargs in the init. Retrieve all passed attributes\n init_kwargs = dict(kwargs.items())\n\n # Retrieve default values\n fields = dataclasses.fields(self)\n default_kwargs = {}\n for field in fields:\n # ignore flax specific attributes\n if field.name in self._flax_internal_args:\n continue\n if type(field.default) == dataclasses._MISSING_TYPE:\n default_kwargs[field.name] = None\n else:\n default_kwargs[field.name] = getattr(self, field.name)\n\n # Make sure init_kwargs override default kwargs\n new_kwargs = {**default_kwargs, **init_kwargs}\n # dtype should be part of `init_kwargs`, but not `new_kwargs`\n if \"dtype\" in new_kwargs:\n new_kwargs.pop(\"dtype\")\n\n # Get positional arguments aligned with kwargs\n for i, arg in enumerate(args):\n name = fields[i].name\n new_kwargs[name] = arg\n\n # Take note of the parameters that were not present in the loaded config\n if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0:\n new_kwargs[\"_use_default_values\"] = list(set(new_kwargs.keys()) - set(init_kwargs))\n\n getattr(self, \"register_to_config\")(**new_kwargs)\n original_init(self, *args, **kwargs)\n\n cls.__init__ = init\n return cls" }, { "identifier": "BaseOutput", "path": "diffusers/src/diffusers/utils/outputs.py", "snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n Python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n first.\n\n </Tip>\n \"\"\"\n\n def __init_subclass__(cls) -> None:\n \"\"\"Register subclasses as pytree nodes.\n\n This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with\n `static_graph=True` with modules that output `ModelOutput` subclasses.\n \"\"\"\n if is_torch_available():\n import torch.utils._pytree\n\n torch.utils._pytree._register_pytree_node(\n cls,\n torch.utils._pytree._dict_flatten,\n lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),\n )\n\n def __post_init__(self) -> None:\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k: Any) -> Any:\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name: Any, value: Any) -> None:\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def __reduce__(self):\n if not is_dataclass(self):\n return super().__reduce__()\n callable, _args, *remaining = super().__reduce__()\n args = tuple(getattr(self, field.name) for field in fields(self))\n return callable, args, *remaining\n\n def to_tuple(self) -> Tuple[Any, ...]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" }, { "identifier": "FlaxTimestepEmbedding", "path": "diffusers/src/diffusers/models/embeddings_flax.py", "snippet": "class FlaxTimestepEmbedding(nn.Module):\n r\"\"\"\n Time step Embedding Module. Learns embeddings for input time steps.\n\n Args:\n time_embed_dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n\n time_embed_dim: int = 32\n dtype: jnp.dtype = jnp.float32\n\n @nn.compact\n def __call__(self, temb):\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_1\")(temb)\n temb = nn.silu(temb)\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_2\")(temb)\n return temb" }, { "identifier": "FlaxTimesteps", "path": "diffusers/src/diffusers/models/embeddings_flax.py", "snippet": "class FlaxTimesteps(nn.Module):\n r\"\"\"\n Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239\n\n Args:\n dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n \"\"\"\n\n dim: int = 32\n flip_sin_to_cos: bool = False\n freq_shift: float = 1\n\n @nn.compact\n def __call__(self, timesteps):\n return get_sinusoidal_embeddings(\n timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift\n )" }, { "identifier": "FlaxModelMixin", "path": "diffusers/src/diffusers/models/modeling_flax_utils.py", "snippet": "class FlaxModelMixin(PushToHubMixin):\n r\"\"\"\n Base class for all Flax models.\n\n [`FlaxModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and\n saving models.\n\n - **config_name** ([`str`]) -- Filename to save a model to when calling [`~FlaxModelMixin.save_pretrained`].\n \"\"\"\n\n config_name = CONFIG_NAME\n _automatically_saved_args = [\"_diffusers_version\", \"_class_name\", \"_name_or_path\"]\n _flax_internal_args = [\"name\", \"parent\", \"dtype\"]\n\n @classmethod\n def _from_config(cls, config, **kwargs):\n \"\"\"\n All context managers that the model should be initialized under go here.\n \"\"\"\n return cls(config, **kwargs)\n\n def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:\n \"\"\"\n Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`.\n \"\"\"\n\n # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27\n def conditional_cast(param):\n if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):\n param = param.astype(dtype)\n return param\n\n if mask is None:\n return jax.tree_map(conditional_cast, params)\n\n flat_params = flatten_dict(params)\n flat_mask, _ = jax.tree_flatten(mask)\n\n for masked, key in zip(flat_mask, flat_params.keys()):\n if masked:\n param = flat_params[key]\n flat_params[key] = conditional_cast(param)\n\n return unflatten_dict(flat_params)\n\n def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n r\"\"\"\n Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast\n the `params` in place.\n\n This method can be used on a TPU to explicitly convert the model parameters to bfloat16 precision to do full\n half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.\n\n Arguments:\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n mask (`Union[Dict, FrozenDict]`):\n A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`\n for params you want to cast, and `False` for those you want to skip.\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # load model\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision\n >>> params = model.to_bf16(params)\n >>> # If you don't want to cast certain parameters (for example layer norm bias and scale)\n >>> # then pass the mask as follows\n >>> from flax import traverse_util\n\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> flat_params = traverse_util.flatten_dict(params)\n >>> mask = {\n ... path: (path[-2] != (\"LayerNorm\", \"bias\") and path[-2:] != (\"LayerNorm\", \"scale\"))\n ... for path in flat_params\n ... }\n >>> mask = traverse_util.unflatten_dict(mask)\n >>> params = model.to_bf16(params, mask)\n ```\"\"\"\n return self._cast_floating_to(params, jnp.bfloat16, mask)\n\n def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):\n r\"\"\"\n Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the\n model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.\n\n Arguments:\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n mask (`Union[Dict, FrozenDict]`):\n A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`\n for params you want to cast, and `False` for those you want to skip.\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # Download model and configuration from huggingface.co\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # By default, the model params will be in fp32, to illustrate the use of this method,\n >>> # we'll first cast to fp16 and back to fp32\n >>> params = model.to_f16(params)\n >>> # now cast back to fp32\n >>> params = model.to_fp32(params)\n ```\"\"\"\n return self._cast_floating_to(params, jnp.float32, mask)\n\n def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n r\"\"\"\n Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the\n `params` in place.\n\n This method can be used on a GPU to explicitly convert the model parameters to float16 precision to do full\n half-precision training or to save weights in float16 for inference in order to save memory and improve speed.\n\n Arguments:\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n mask (`Union[Dict, FrozenDict]`):\n A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`\n for params you want to cast, and `False` for those you want to skip.\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # load model\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # By default, the model params will be in fp32, to cast these to float16\n >>> params = model.to_fp16(params)\n >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)\n >>> # then pass the mask as follows\n >>> from flax import traverse_util\n\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> flat_params = traverse_util.flatten_dict(params)\n >>> mask = {\n ... path: (path[-2] != (\"LayerNorm\", \"bias\") and path[-2:] != (\"LayerNorm\", \"scale\"))\n ... for path in flat_params\n ... }\n >>> mask = traverse_util.unflatten_dict(mask)\n >>> params = model.to_fp16(params, mask)\n ```\"\"\"\n return self._cast_floating_to(params, jnp.float16, mask)\n\n def init_weights(self, rng: jax.Array) -> Dict:\n raise NotImplementedError(f\"init_weights method has to be implemented for {self}\")\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n dtype: jnp.dtype = jnp.float32,\n *model_args,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a pretrained Flax model from a pretrained model configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n Can be either:\n\n - A string, the *model id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained model\n hosted on the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved\n using [`~FlaxModelMixin.save_pretrained`].\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified, all the computation will be performed with the given `dtype`.\n\n <Tip>\n\n This only specifies the dtype of the *computation* and does not influence the dtype of model\n parameters.\n\n If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and\n [`~FlaxModelMixin.to_bf16`].\n\n </Tip>\n\n model_args (sequence of positional arguments, *optional*):\n All remaining positional arguments are passed to the underlying model's `__init__` method.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n from_pt (`bool`, *optional*, defaults to `False`):\n Load the model weights from a PyTorch checkpoint save file.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the model (for\n example, `output_attentions=True`). Behaves differently depending on whether a `config` is provided or\n automatically loaded:\n\n - If a configuration is provided with `config`, `kwargs` are directly passed to the underlying\n model's `__init__` method (we assume all relevant updates to the configuration have already been\n done).\n - If a configuration is not provided, `kwargs` are first passed to the configuration class\n initialization function [`~ConfigMixin.from_config`]. Each key of the `kwargs` that corresponds\n to a configuration attribute is used to override said attribute with the supplied `kwargs` value.\n Remaining keys that do not correspond to any configuration attribute are passed to the underlying\n model's `__init__` function.\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # Download model and configuration from huggingface.co and cache.\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"./test/saved_model/\")\n ```\n\n If you get the error message below, you need to finetune the weights for your downstream task:\n\n ```bash\n Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:\n - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated\n You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n ```\n \"\"\"\n config = kwargs.pop(\"config\", None)\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n from_pt = kwargs.pop(\"from_pt\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n\n user_agent = {\n \"diffusers\": __version__,\n \"file_type\": \"model\",\n \"framework\": \"flax\",\n }\n\n # Load config if we don't provide one\n if config is None:\n config, unused_kwargs = cls.load_config(\n pretrained_model_name_or_path,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n **kwargs,\n )\n\n model, model_kwargs = cls.from_config(config, dtype=dtype, return_unused_kwargs=True, **unused_kwargs)\n\n # Load model\n pretrained_path_with_subfolder = (\n pretrained_model_name_or_path\n if subfolder is None\n else os.path.join(pretrained_model_name_or_path, subfolder)\n )\n if os.path.isdir(pretrained_path_with_subfolder):\n if from_pt:\n if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):\n raise EnvironmentError(\n f\"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} \"\n )\n model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)\n elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)):\n # Load from a Flax checkpoint\n model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)\n # Check if pytorch weights exist instead\n elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):\n raise EnvironmentError(\n f\"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model\"\n \" using `from_pt=True`.\"\n )\n else:\n raise EnvironmentError(\n f\"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory \"\n f\"{pretrained_path_with_subfolder}.\"\n )\n else:\n try:\n model_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier \"\n \"listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a \"\n \"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli \"\n \"login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for \"\n \"this model name. Check the model page at \"\n f\"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n f\"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\\n\"\n f\"{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\\nCheckout your\"\n \" internet connection or see how to run the library in offline mode at\"\n \" 'https://huggingface.co/docs/transformers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\"\n )\n\n if from_pt:\n if is_torch_available():\n from .modeling_utils import load_state_dict\n else:\n raise EnvironmentError(\n \"Can't load the model in PyTorch format because PyTorch is not installed. \"\n \"Please, install PyTorch or use native Flax weights.\"\n )\n\n # Step 1: Get the pytorch file\n pytorch_model_file = load_state_dict(model_file)\n\n # Step 2: Convert the weights\n state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model)\n else:\n try:\n with open(model_file, \"rb\") as state_f:\n state = from_bytes(cls, state_f.read())\n except (UnpicklingError, msgpack.exceptions.ExtraData) as e:\n try:\n with open(model_file) as f:\n if f.read().startswith(\"version\"):\n raise OSError(\n \"You seem to have cloned a repository without having git-lfs installed. Please\"\n \" install git-lfs and run `git lfs install` followed by `git lfs pull` in the\"\n \" folder you cloned.\"\n )\n else:\n raise ValueError from e\n except (UnicodeDecodeError, ValueError):\n raise EnvironmentError(f\"Unable to convert {model_file} to Flax deserializable object. \")\n # make sure all arrays are stored as jnp.ndarray\n # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4:\n # https://github.com/google/flax/issues/1261\n state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend=\"cpu\")[0]), state)\n\n # flatten dicts\n state = flatten_dict(state)\n\n params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0))\n required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys())\n\n shape_state = flatten_dict(unfreeze(params_shape_tree))\n\n missing_keys = required_params - set(state.keys())\n unexpected_keys = set(state.keys()) - required_params\n\n if missing_keys:\n logger.warning(\n f\"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. \"\n \"Make sure to call model.init_weights to initialize the missing weights.\"\n )\n cls._missing_keys = missing_keys\n\n for key in state.keys():\n if key in shape_state and state[key].shape != shape_state[key].shape:\n raise ValueError(\n f\"Trying to load the pretrained weight for {key} failed: checkpoint has shape \"\n f\"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. \"\n )\n\n # remove unexpected keys to not be saved again\n for unexpected_key in unexpected_keys:\n del state[unexpected_key]\n\n if len(unexpected_keys) > 0:\n logger.warning(\n f\"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when\"\n f\" initializing {model.__class__.__name__}: {unexpected_keys}\\n- This IS expected if you are\"\n f\" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or\"\n \" with another architecture.\"\n )\n else:\n logger.info(f\"All model checkpoint weights were used when initializing {model.__class__.__name__}.\\n\")\n\n if len(missing_keys) > 0:\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\\nYou should probably\"\n \" TRAIN this model on a down-stream task to be able to use it for predictions and inference.\"\n )\n else:\n logger.info(\n f\"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path}.\\nIf your task is similar to the task the model of the checkpoint\"\n f\" was trained on, you can already use {model.__class__.__name__} for predictions without further\"\n \" training.\"\n )\n\n return model, unflatten_dict(state)\n\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n params: Union[Dict, FrozenDict],\n is_main_process: bool = True,\n push_to_hub: bool = False,\n **kwargs,\n ):\n \"\"\"\n Save a model and its configuration file to a directory so that it can be reloaded using the\n [`~FlaxModelMixin.from_pretrained`] class method.\n\n Arguments:\n save_directory (`str` or `os.PathLike`):\n Directory to save a model and its configuration file to. Will be created if it doesn't exist.\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n is_main_process (`bool`, *optional*, defaults to `True`):\n Whether the process calling this is the main process or not. Useful during distributed training and you\n need to call this function on all processes. In this case, set `is_main_process=True` only on the main\n process to avoid race conditions.\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n logger.error(f\"Provided path ({save_directory}) should be a directory, not a file\")\n return\n\n os.makedirs(save_directory, exist_ok=True)\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n model_to_save = self\n\n # Attach architecture to the config\n # Save the config\n if is_main_process:\n model_to_save.save_config(save_directory)\n\n # save model\n output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME)\n with open(output_model_file, \"wb\") as f:\n model_bytes = to_bytes(params)\n f.write(model_bytes)\n\n logger.info(f\"Model weights saved in {output_model_file}\")\n\n if push_to_hub:\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )" }, { "identifier": "FlaxCrossAttnDownBlock2D", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxCrossAttnDownBlock2D(nn.Module):\n r\"\"\"\n Cross Attention 2D Downsizing block - original architecture from Unet transformers:\n https://arxiv.org/abs/2103.06104\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n out_channels (:obj:`int`):\n Output channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n num_attention_heads (:obj:`int`, *optional*, defaults to 1):\n Number of attention heads of each spatial transformer block\n add_downsample (:obj:`bool`, *optional*, defaults to `True`):\n Whether to add downsampling layer before each final output\n use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):\n enable memory efficient attention https://arxiv.org/abs/2112.05682\n split_head_dim (`bool`, *optional*, defaults to `False`):\n Whether to split the head dimension into a new axis for the self-attention computation. In most cases,\n enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n\n in_channels: int\n out_channels: int\n dropout: float = 0.0\n num_layers: int = 1\n num_attention_heads: int = 1\n add_downsample: bool = True\n use_linear_projection: bool = False\n only_cross_attention: bool = False\n use_memory_efficient_attention: bool = False\n split_head_dim: bool = False\n dtype: jnp.dtype = jnp.float32\n transformer_layers_per_block: int = 1\n\n def setup(self):\n resnets = []\n attentions = []\n\n for i in range(self.num_layers):\n in_channels = self.in_channels if i == 0 else self.out_channels\n\n res_block = FlaxResnetBlock2D(\n in_channels=in_channels,\n out_channels=self.out_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n\n attn_block = FlaxTransformer2DModel(\n in_channels=self.out_channels,\n n_heads=self.num_attention_heads,\n d_head=self.out_channels // self.num_attention_heads,\n depth=self.transformer_layers_per_block,\n use_linear_projection=self.use_linear_projection,\n only_cross_attention=self.only_cross_attention,\n use_memory_efficient_attention=self.use_memory_efficient_attention,\n split_head_dim=self.split_head_dim,\n dtype=self.dtype,\n )\n attentions.append(attn_block)\n\n self.resnets = resnets\n self.attentions = attentions\n\n if self.add_downsample:\n self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)\n\n def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True):\n output_states = ()\n\n for resnet, attn in zip(self.resnets, self.attentions):\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)\n output_states += (hidden_states,)\n\n if self.add_downsample:\n hidden_states = self.downsamplers_0(hidden_states)\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "FlaxDownBlock2D", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxDownBlock2D(nn.Module):\n r\"\"\"\n Flax 2D downsizing block\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n out_channels (:obj:`int`):\n Output channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n add_downsample (:obj:`bool`, *optional*, defaults to `True`):\n Whether to add downsampling layer before each final output\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n\n in_channels: int\n out_channels: int\n dropout: float = 0.0\n num_layers: int = 1\n add_downsample: bool = True\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n resnets = []\n\n for i in range(self.num_layers):\n in_channels = self.in_channels if i == 0 else self.out_channels\n\n res_block = FlaxResnetBlock2D(\n in_channels=in_channels,\n out_channels=self.out_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n self.resnets = resnets\n\n if self.add_downsample:\n self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)\n\n def __call__(self, hidden_states, temb, deterministic=True):\n output_states = ()\n\n for resnet in self.resnets:\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n output_states += (hidden_states,)\n\n if self.add_downsample:\n hidden_states = self.downsamplers_0(hidden_states)\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "FlaxUNetMidBlock2DCrossAttn", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxUNetMidBlock2DCrossAttn(nn.Module):\n r\"\"\"\n Cross Attention 2D Mid-level block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n num_attention_heads (:obj:`int`, *optional*, defaults to 1):\n Number of attention heads of each spatial transformer block\n use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):\n enable memory efficient attention https://arxiv.org/abs/2112.05682\n split_head_dim (`bool`, *optional*, defaults to `False`):\n Whether to split the head dimension into a new axis for the self-attention computation. In most cases,\n enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n\n in_channels: int\n dropout: float = 0.0\n num_layers: int = 1\n num_attention_heads: int = 1\n use_linear_projection: bool = False\n use_memory_efficient_attention: bool = False\n split_head_dim: bool = False\n dtype: jnp.dtype = jnp.float32\n transformer_layers_per_block: int = 1\n\n def setup(self):\n # there is always at least one resnet\n resnets = [\n FlaxResnetBlock2D(\n in_channels=self.in_channels,\n out_channels=self.in_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n ]\n\n attentions = []\n\n for _ in range(self.num_layers):\n attn_block = FlaxTransformer2DModel(\n in_channels=self.in_channels,\n n_heads=self.num_attention_heads,\n d_head=self.in_channels // self.num_attention_heads,\n depth=self.transformer_layers_per_block,\n use_linear_projection=self.use_linear_projection,\n use_memory_efficient_attention=self.use_memory_efficient_attention,\n split_head_dim=self.split_head_dim,\n dtype=self.dtype,\n )\n attentions.append(attn_block)\n\n res_block = FlaxResnetBlock2D(\n in_channels=self.in_channels,\n out_channels=self.in_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n\n self.resnets = resnets\n self.attentions = attentions\n\n def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet in zip(self.attentions, self.resnets[1:]):\n hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n\n return hidden_states" } ]
from typing import Optional, Tuple, Union from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_2d_blocks_flax import ( FlaxCrossAttnDownBlock2D, FlaxDownBlock2D, FlaxUNetMidBlock2DCrossAttn, ) import flax import flax.linen as nn import jax import jax.numpy as jnp
16,545
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @flax.struct.dataclass class FlaxControlNetOutput(BaseOutput): """ The output of [`FlaxControlNetModel`]. Args: down_block_res_samples (`jnp.ndarray`): mid_block_res_sample (`jnp.ndarray`): """ down_block_res_samples: jnp.ndarray mid_block_res_sample: jnp.ndarray class FlaxControlNetConditioningEmbedding(nn.Module): conditioning_embedding_channels: int block_out_channels: Tuple[int, ...] = (16, 32, 96, 256) dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.conv_in = nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks = [] for i in range(len(self.block_out_channels) - 1): channel_in = self.block_out_channels[i] channel_out = self.block_out_channels[i + 1] conv1 = nn.Conv( channel_in, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(conv1) conv2 = nn.Conv( channel_out, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(conv2) self.blocks = blocks self.conv_out = nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__(self, conditioning: jnp.ndarray) -> jnp.ndarray: embedding = self.conv_in(conditioning) embedding = nn.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = nn.silu(embedding) embedding = self.conv_out(embedding) return embedding @flax_register_to_config
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @flax.struct.dataclass class FlaxControlNetOutput(BaseOutput): """ The output of [`FlaxControlNetModel`]. Args: down_block_res_samples (`jnp.ndarray`): mid_block_res_sample (`jnp.ndarray`): """ down_block_res_samples: jnp.ndarray mid_block_res_sample: jnp.ndarray class FlaxControlNetConditioningEmbedding(nn.Module): conditioning_embedding_channels: int block_out_channels: Tuple[int, ...] = (16, 32, 96, 256) dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.conv_in = nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks = [] for i in range(len(self.block_out_channels) - 1): channel_in = self.block_out_channels[i] channel_out = self.block_out_channels[i + 1] conv1 = nn.Conv( channel_in, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(conv1) conv2 = nn.Conv( channel_out, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(conv2) self.blocks = blocks self.conv_out = nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__(self, conditioning: jnp.ndarray) -> jnp.ndarray: embedding = self.conv_in(conditioning) embedding = nn.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = nn.silu(embedding) embedding = self.conv_out(embedding) return embedding @flax_register_to_config
class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin):
5
2023-12-28 08:17:40+00:00
24k
FoundationVision/UniRef
detectron2/evaluation/coco_evaluation.py
[ { "identifier": "CfgNode", "path": "detectron2/config/config.py", "snippet": "class CfgNode(_CfgNode):\n \"\"\"\n The same as `fvcore.common.config.CfgNode`, but different in:\n\n 1. Use unsafe yaml loading by default.\n Note that this may lead to arbitrary code execution: you must not\n load a config file from untrusted sources before manually inspecting\n the content of the file.\n 2. Support config versioning.\n When attempting to merge an old config, it will convert the old config automatically.\n\n .. automethod:: clone\n .. automethod:: freeze\n .. automethod:: defrost\n .. automethod:: is_frozen\n .. automethod:: load_yaml_with_base\n .. automethod:: merge_from_list\n .. automethod:: merge_from_other_cfg\n \"\"\"\n\n @classmethod\n def _open_cfg(cls, filename):\n return PathManager.open(filename, \"r\")\n\n # Note that the default value of allow_unsafe is changed to True\n def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:\n \"\"\"\n Load content from the given config file and merge it into self.\n\n Args:\n cfg_filename: config filename\n allow_unsafe: allow unsafe yaml syntax\n \"\"\"\n assert PathManager.isfile(cfg_filename), f\"Config file '{cfg_filename}' does not exist!\"\n loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)\n loaded_cfg = type(self)(loaded_cfg)\n\n # defaults.py needs to import CfgNode\n from .defaults import _C\n\n latest_ver = _C.VERSION\n assert (\n latest_ver == self.VERSION\n ), \"CfgNode.merge_from_file is only allowed on a config object of latest version!\"\n\n logger = logging.getLogger(__name__)\n\n loaded_ver = loaded_cfg.get(\"VERSION\", None)\n if loaded_ver is None:\n from .compat import guess_version\n\n loaded_ver = guess_version(loaded_cfg, cfg_filename)\n assert loaded_ver <= self.VERSION, \"Cannot merge a v{} config into a v{} config.\".format(\n loaded_ver, self.VERSION\n )\n\n if loaded_ver == self.VERSION:\n self.merge_from_other_cfg(loaded_cfg)\n else:\n # compat.py needs to import CfgNode\n from .compat import upgrade_config, downgrade_config\n\n logger.warning(\n \"Loading an old v{} config file '{}' by automatically upgrading to v{}. \"\n \"See docs/CHANGELOG.md for instructions to update your files.\".format(\n loaded_ver, cfg_filename, self.VERSION\n )\n )\n # To convert, first obtain a full config at an old version\n old_self = downgrade_config(self, to_version=loaded_ver)\n old_self.merge_from_other_cfg(loaded_cfg)\n new_config = upgrade_config(old_self)\n self.clear()\n self.update(new_config)\n\n def dump(self, *args, **kwargs):\n \"\"\"\n Returns:\n str: a yaml string representation of the config\n \"\"\"\n # to make it show up in docs\n return super().dump(*args, **kwargs)" }, { "identifier": "MetadataCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "convert_to_coco_json", "path": "detectron2/data/datasets/coco.py", "snippet": "def convert_to_coco_json(dataset_name, output_file, allow_cached=True):\n \"\"\"\n Converts dataset into COCO format and saves it to a json file.\n dataset_name must be registered in DatasetCatalog and in detectron2's standard format.\n\n Args:\n dataset_name:\n reference from the config file to the catalogs\n must be registered in DatasetCatalog and in detectron2's standard format\n output_file: path of json file that will be saved to\n allow_cached: if json file is already present then skip conversion\n \"\"\"\n\n # TODO: The dataset or the conversion script *may* change,\n # a checksum would be useful for validating the cached data\n\n PathManager.mkdirs(os.path.dirname(output_file))\n with file_lock(output_file):\n if PathManager.exists(output_file) and allow_cached:\n logger.warning(\n f\"Using previously cached COCO format annotations at '{output_file}'. \"\n \"You need to clear the cache file if your dataset has been modified.\"\n )\n else:\n logger.info(f\"Converting annotations of dataset '{dataset_name}' to COCO format ...)\")\n coco_dict = convert_to_coco_dict(dataset_name)\n\n logger.info(f\"Caching COCO format annotations at '{output_file}' ...\")\n tmp_file = output_file + \".tmp\"\n with PathManager.open(tmp_file, \"w\") as f:\n json.dump(coco_dict, f)\n shutil.move(tmp_file, output_file)" }, { "identifier": "Boxes", "path": "detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "BoxMode", "path": "detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "pairwise_iou", "path": "detectron2/structures/boxes.py", "snippet": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M, compute the IoU\n (intersection over union) between **all** N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoU, sized [N,M].\n \"\"\"\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n iou = torch.where(\n inter > 0,\n inter / (area1[:, None] + area2 - inter),\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\n )\n return iou" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "create_small_table", "path": "detectron2/utils/logger.py", "snippet": "def create_small_table(small_dict):\n \"\"\"\n Create a small table using the keys of small_dict as headers. This is only\n suitable for small dictionaries.\n\n Args:\n small_dict (dict): a result dictionary of only a few items.\n\n Returns:\n str: the table as a string.\n \"\"\"\n keys, values = tuple(zip(*small_dict.items()))\n table = tabulate(\n [values],\n headers=keys,\n tablefmt=\"pipe\",\n floatfmt=\".3f\",\n stralign=\"center\",\n numalign=\"center\",\n )\n return table" }, { "identifier": "DatasetEvaluator", "path": "detectron2/evaluation/evaluator.py", "snippet": "class DatasetEvaluator:\n \"\"\"\n Base class for a dataset evaluator.\n\n The function :func:`inference_on_dataset` runs the model over\n all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.\n\n This class will accumulate information of the inputs/outputs (by :meth:`process`),\n and produce evaluation results in the end (by :meth:`evaluate`).\n \"\"\"\n\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n def process(self, inputs, outputs):\n \"\"\"\n Process the pair of inputs and outputs.\n If they contain batches, the pairs can be consumed one-by-one using `zip`:\n\n .. code-block:: python\n\n for input_, output in zip(inputs, outputs):\n # do evaluation on single input/output pair\n ...\n\n Args:\n inputs (list): the inputs that's used to call the model.\n outputs (list): the return value of `model(inputs)`\n \"\"\"\n pass\n\n def evaluate(self):\n \"\"\"\n Evaluate/summarize the performance, after processing all input/output pairs.\n\n Returns:\n dict:\n A new evaluator class can return a dict of arbitrary format\n as long as the user can process the results.\n In our train_net.py, we expect the following format:\n\n * key: the name of the task (e.g., bbox)\n * value: a dict of {metric name: score}, e.g.: {\"AP50\": 80}\n \"\"\"\n pass" }, { "identifier": "RefCOCOeval", "path": "detectron2/evaluation/refcocoeval.py", "snippet": "class RefCOCOeval:\n # Interface for evaluating detection on the Microsoft COCO dataset.\n #\n # The usage for CocoEval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n # for computing overall iou\n self.total_intersection_area = 0\n self.total_union_area = 0\n self.iou_list = []\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n # evaluateImg = self.evaluateImg\n # maxDet = p.maxDets[-1]\n # self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)\n # for catId in catIds\n # for areaRng in p.areaRng\n # for imgId in p.imgIds\n # ]\n # self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n\n # for computing overall iou\n # there is only one bbox and segm\n if p.iouType == 'bbox':\n g, d = g[0], d[0]\n g_bbox = [g[0], g[1], g[2] + g[0], g[3] + g[1]] # x1y1wh -> x1y1x2y2\n d_bbox = [d[0], d[1], d[2] + d[0], d[3] + d[1]] # x1y1wh -> x1y1x2y2\n g_bbox = torch.tensor(g_bbox).unsqueeze(0)\n d_bbox = torch.tensor(d_bbox).unsqueeze(0)\n iou, intersection, union = compute_bbox_iou(d_bbox, g_bbox)\n elif p.iouType == 'segm':\n g_segm = decode(g[0])\n d_segm = decode(d[0])\n g_segm = torch.tensor(g_segm).unsqueeze(0)\n d_segm = torch.tensor(d_segm).unsqueeze(0)\n iou, intersection, union = compute_mask_iou(d_segm, g_segm)\n else:\n raise Exception('unknown iouType for iou computation')\n iou, intersection, union = iou.item(), intersection.item(), union.item()\n self.total_intersection_area += intersection\n self.total_union_area += union\n self.iou_list.append(iou)\n return ious\n\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()" } ]
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .evaluator import DatasetEvaluator from detectron2.evaluation.fast_eval_api import COCOeval_opt from detectron2.evaluation.refcocoeval import RefCOCOeval
14,821
""" num_instance = len(instances) if num_instance == 0: return [] boxes = instances.pred_boxes.tensor.numpy() boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) boxes = boxes.tolist() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() has_mask = instances.has("pred_masks") if has_mask: # use RLE to encode the masks, because they are too large and takes memory # since this evaluator stores outputs of the entire dataset rles = [ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] for mask in instances.pred_masks ] for rle in rles: # "counts" is an array encoded by mask_util as a byte-stream. Python3's # json writer which always produces strings cannot serialize a bytestream # unless you decode it. Thankfully, utf-8 works out (which is also what # the pycocotools/_mask.pyx does). rle["counts"] = rle["counts"].decode("utf-8") has_keypoints = instances.has("pred_keypoints") if has_keypoints: keypoints = instances.pred_keypoints results = [] for k in range(num_instance): result = { "image_id": img_id, "category_id": classes[k], "bbox": boxes[k], "score": scores[k], } if has_mask: result["segmentation"] = rles[k] if has_keypoints: # In COCO annotations, # keypoints coordinates are pixel indices. # However our predictions are floating point coordinates. # Therefore we subtract 0.5 to be consistent with the annotation format. # This is the inverse of data loading logic in `datasets/coco.py`. keypoints[k][:, :2] -= 0.5 result["keypoints"] = keypoints[k].flatten().tolist() results.append(result) return results # inspired from Detectron: # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = Boxes(gt_boxes) gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) if len(gt_boxes) == 0 or len(predictions) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if limit is not None and len(predictions) > limit: predictions = predictions[:limit]
# Copyright (c) Facebook, Inc. and its affiliates. try: except ImportError: COCOeval_opt = COCOeval class COCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), allow_cached_coco=True, force_tasks=None, refcoco=False ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (int): limit on the maximum number of detections per image. By default in COCO, this limit is to 100, but this can be customized to be greater, as is needed in evaluation metrics AP fixed and AP pool (see https://arxiv.org/pdf/2102.01066.pdf) This doesn't affect keypoint evaluation. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval When empty, it will use the defaults in COCO. Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. allow_cached_coco (bool): Whether to use cached coco json from previous validation runs. You should set this to False if you need to use different validation data. Defaults to True. """ self.dataset_name = dataset_name self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self.force_tasks = force_tasks self.refcoco = refcoco if use_fast_impl and (COCOeval_opt is COCOeval): self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") use_fast_impl = False self._use_fast_impl = use_fast_impl # COCOeval requires the limit on the number of detections per image (maxDets) to be a list # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the # 3rd element (100) is used as the limit on the number of detections per image when # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. if max_dets_per_image is None: max_dets_per_image = [1, 10, 100] else: max_dets_per_image = [1, 10, max_dets_per_image] self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): if output_dir is None: raise ValueError( "output_dir must be provided to COCOEvaluator " "for datasets not in COCO format." ) self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset if self._do_evaluation: self._kpt_oks_sigmas = kpt_oks_sigmas def reset(self): self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a COCO model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ for input, output in zip(inputs, outputs): prediction = {"image_id": input["image_id"]} if "instances" in output: instances = output["instances"].to(self._cpu_device) prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) if "proposals" in output: prediction["proposals"] = output["proposals"].to(self._cpu_device) if len(prediction) > 1: self._predictions.append(prediction) def evaluate(self, img_ids=None): """ Args: img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset """ if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return {} else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) self._results = OrderedDict() if "proposals" in predictions[0]: self._eval_box_proposals(predictions) if "instances" in predictions[0]: self._eval_predictions(predictions, img_ids=img_ids) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def _tasks_from_predictions(self, predictions): """ Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. """ tasks = {"bbox"} for pred in predictions: if "segmentation" in pred: tasks.add("segm") if "keypoints" in pred: tasks.add("keypoints") return sorted(tasks) def _eval_predictions(self, predictions, img_ids=None): """ Evaluate predictions. Fill self._results with the metrics of the tasks. """ self._logger.info("Preparing results for COCO format ...") coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) tasks = self._tasks or self._tasks_from_predictions(coco_results) if self.force_tasks is not None: tasks = self.force_tasks # unmap the category ids for COCO if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) num_classes = len(all_contiguous_ids) assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} for result in coco_results: category_id = result["category_id"] assert category_id < num_classes, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}]." ) result["category_id"] = reverse_id_mapping[category_id] if self._output_dir: if "refcoco" in self.dataset_name: file_path = os.path.join(self._output_dir, "{}_instances_results.json".format(self.dataset_name)) else: file_path = os.path.join(self._output_dir, "coco_instances_results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(coco_results)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info( "Evaluating predictions with {} COCO API...".format( "unofficial" if self._use_fast_impl else "official" ) ) for task in sorted(tasks): assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!" coco_eval = ( _evaluate_predictions_on_coco( self._coco_api, coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas, use_fast_impl=self._use_fast_impl, img_ids=img_ids, max_dets_per_image=self._max_dets_per_image, refcoco=self.refcoco ) if len(coco_results) > 0 else None # cocoapi does not handle empty results very well ) if not self.refcoco: res = self._derive_coco_results( coco_eval, task, class_names=self._metadata.get("thing_classes") ) self._results[task] = res else: res = self._derive_refcoco_results(coco_eval, task) self._results[task] = res def _eval_box_proposals(self, predictions): """ Evaluate the box proposals in predictions. Fill self._results with the metrics for "box_proposals" task. """ if self._output_dir: # Saving generated box proposals to file. # Predicted box_proposals are in XYXY_ABS mode. bbox_mode = BoxMode.XYXY_ABS.value ids, boxes, objectness_logits = [], [], [] for prediction in predictions: ids.append(prediction["image_id"]) boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) proposal_data = { "boxes": boxes, "objectness_logits": objectness_logits, "ids": ids, "bbox_mode": bbox_mode, } with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: pickle.dump(proposal_data, f) if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info("Evaluating bbox proposals ...") res = {} areas = {"all": "", "small": "s", "medium": "m", "large": "l"} for limit in [100, 1000]: for area, suffix in areas.items(): stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) key = "AR{}@{:d}".format(suffix, limit) res[key] = float(stats["ar"].item() * 100) self._logger.info("Proposal metrics: \n" + create_small_table(res)) self._results["box_proposals"] = res def _derive_coco_results(self, coco_eval, iou_type, class_names=None): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") if class_names is None or len(class_names) <= 1: return results # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format(iou_type) + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def _derive_refcoco_results(self, coco_eval, iou_type): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = {"bbox": ["[email protected]", "[email protected]", "[email protected]", "[email protected]", "[email protected]", "oIoU", "mIoU"], "segm": ["[email protected]", "[email protected]", "[email protected]", "[email protected]", "[email protected]", "oIoU", "mIoU"] }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float("nan") for idx, metric in enumerate(metrics) } ious = np.array([v for (k, v) in coco_eval.ious.items()]) total_intersection_area = coco_eval.total_intersection_area total_union_area = coco_eval.total_union_area iou_list = coco_eval.iou_list # compute metrics results["[email protected]"] = np.sum(ious > 0.5) / len(ious) * 100 results["[email protected]"] = np.sum(ious > 0.6) / len(ious) * 100 results["[email protected]"] = np.sum(ious > 0.7) / len(ious) * 100 results["[email protected]"] = np.sum(ious > 0.8) / len(ious) * 100 results["[email protected]"] = np.sum(ious > 0.9) / len(ious) * 100 results["oIoU"] = total_intersection_area / total_union_area * 100 results["mIoU"] = np.mean(ious) * 100 # if iou_type == "bbox": # results["[email protected]"] = np.sum(ious > 0.5) / len(ious) * 100 # elif iou_type == "segm": # results["mIoU"] = np.mean(ious) * 100 # else: # raise ValueError("Unsupported iou_type!") self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) # results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json(instances, img_id): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): img_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ num_instance = len(instances) if num_instance == 0: return [] boxes = instances.pred_boxes.tensor.numpy() boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) boxes = boxes.tolist() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() has_mask = instances.has("pred_masks") if has_mask: # use RLE to encode the masks, because they are too large and takes memory # since this evaluator stores outputs of the entire dataset rles = [ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] for mask in instances.pred_masks ] for rle in rles: # "counts" is an array encoded by mask_util as a byte-stream. Python3's # json writer which always produces strings cannot serialize a bytestream # unless you decode it. Thankfully, utf-8 works out (which is also what # the pycocotools/_mask.pyx does). rle["counts"] = rle["counts"].decode("utf-8") has_keypoints = instances.has("pred_keypoints") if has_keypoints: keypoints = instances.pred_keypoints results = [] for k in range(num_instance): result = { "image_id": img_id, "category_id": classes[k], "bbox": boxes[k], "score": scores[k], } if has_mask: result["segmentation"] = rles[k] if has_keypoints: # In COCO annotations, # keypoints coordinates are pixel indices. # However our predictions are floating point coordinates. # Therefore we subtract 0.5 to be consistent with the annotation format. # This is the inverse of data loading logic in `datasets/coco.py`. keypoints[k][:, :2] -= 0.5 result["keypoints"] = keypoints[k].flatten().tolist() results.append(result) return results # inspired from Detectron: # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = Boxes(gt_boxes) gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) if len(gt_boxes) == 0 or len(predictions) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if limit is not None and len(predictions) > limit: predictions = predictions[:limit]
overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
5
2023-12-22 13:31:33+00:00
24k
xhuangcv/humannorm
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n # improve the resolution of DMTET at these steps\n progressive_resolution_steps: Optional[int] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n self.cached_sdf = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # adjust the position of mesh\n if \"full_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.3\n elif \"half_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.1\n elif \"head_only\" in mesh_path:\n mesh.vertices[:,2] = mesh.vertices[:,2] + 0.15\n elif \"t-pose\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.4\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(2000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((40000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n\n sdf_loss: Optional[Float[Tensor, \"*N 1\"]] = None\n if self.cfg.use_sdf_loss and self.cached_sdf is not None:\n selected_points_idx = torch.LongTensor(random.sample(range(points_unscaled.shape[0]), 100000))\n gt_sdf = torch.from_numpy(-self.cached_sdf(points_unscaled[selected_points_idx].cpu().numpy())).to(\n points_unscaled\n )[..., None]\n sdf_loss = F.mse_loss(gt_sdf, sdf[selected_points_idx], reduction='sum')\n return sdf, deformation, sdf_loss\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n\n if global_step >= (self.cfg.start_sdf_loss_step + 1) and self.cached_sdf is None:\n\n from pysdf import SDF\n import trimesh\n\n mesh_v_pos = np.load('.threestudio_cache/mesh_v_pos.npy')\n mesh_t_pos_idx = np.load('.threestudio_cache/mesh_t_pos_idx.npy')\n cached_mesh = trimesh.Trimesh(\n vertices=mesh_v_pos,\n faces=mesh_t_pos_idx,\n )\n self.cached_sdf = SDF(cached_mesh.vertices, cached_mesh.faces)\n\n if self.cfg.progressive_resolution_steps is not None:\n if global_step >= self.cfg.progressive_resolution_steps[0] and self.cfg.isosurface_resolution < 256:\n self.cfg.isosurface_resolution = 256\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n if global_step >= self.cfg.progressive_resolution_steps[1] and self.cfg.isosurface_resolution < 512:\n self.cfg.isosurface_resolution = 512\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n \n setattr(co, 'max_cost', 2.0)\n setattr(po, 'resolution', 4096)\n \n atlas.generate(co, po, verbose=True)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF from tqdm import tqdm import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh
15,536
if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") scene = trimesh.load(mesh_path) if isinstance(scene, trimesh.Trimesh): mesh = scene elif isinstance(scene, trimesh.scene.Scene): mesh = trimesh.Trimesh() for obj in scene.geometry.values(): mesh = trimesh.util.concatenate([mesh, obj]) else: raise ValueError(f"Unknown mesh type at {mesh_path}.") # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) # Initialize SDF to a given shape when no weights are provided or force_shape_init is True optim = torch.optim.Adam(self.parameters(), lr=1e-3) for _ in tqdm( range(1000), desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:", disable=get_rank() != 0, ): points_rand = ( torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0 ) sdf_gt = get_gt_sdf(points_rand) sdf_pred = self.forward_sdf(points_rand) loss = F.mse_loss(sdf_pred, sdf_gt) optim.zero_grad() loss.backward() optim.step() # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation)
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return if self.cfg.sdf_bias != 0.0: threestudio.warn( "shape_init and sdf_bias are both specified, which may lead to unexpected results." ) get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") scene = trimesh.load(mesh_path) if isinstance(scene, trimesh.Trimesh): mesh = scene elif isinstance(scene, trimesh.scene.Scene): mesh = trimesh.Trimesh() for obj in scene.geometry.values(): mesh = trimesh.util.concatenate([mesh, obj]) else: raise ValueError(f"Unknown mesh type at {mesh_path}.") # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) # Initialize SDF to a given shape when no weights are provided or force_shape_init is True optim = torch.optim.Adam(self.parameters(), lr=1e-3) for _ in tqdm( range(1000), desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:", disable=get_rank() != 0, ): points_rand = ( torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0 ) sdf_gt = get_gt_sdf(points_rand) sdf_pred = self.forward_sdf(points_rand) loss = F.mse_loss(sdf_pred, sdf_gt) optim.zero_grad() loss.backward() optim.step() # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation)
mesh.v_pos = scale_tensor(
9
2023-12-23 12:37:48+00:00
24k
dakpinaroglu/Frame2seq
frame2seq/openfold/model/structure_module.py
[ { "identifier": "Linear", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n init: str = \"default\",\n init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n init:\n The initializer to use. Choose from:\n\n \"default\": LeCun fan-in truncated normal initialization\n \"relu\": He initialization w/ truncated normal distribution\n \"glorot\": Fan-average Glorot uniform initialization\n \"gating\": Weights=0, Bias=1\n \"normal\": Normal initialization with std=1/sqrt(fan_in)\n \"final\": Weights=0, Bias=0\n\n Overridden by init_fn if the latter is not None.\n init_fn:\n A custom initializer taking weight and bias as inputs.\n Overrides init if not None.\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)\n\n with torch.no_grad():\n if init_fn is not None:\n init_fn(self.weight, self.bias)\n else:\n if init == \"default\":\n lecun_normal_init_(self.weight)\n elif init == \"relu\":\n he_normal_init_(self.weight)\n elif init == \"glorot\":\n glorot_uniform_init_(self.weight)\n elif init == \"gating\":\n gating_init_(self.weight)\n if bias:\n self.bias.fill_(1.0)\n elif init == \"normal\":\n normal_init_(self.weight)\n elif init == \"final\":\n final_init_(self.weight)\n else:\n raise ValueError(\"Invalid init string.\")" }, { "identifier": "LayerNorm", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n d = x.dtype\n # deepspeed_is_initialized = (\n # deepspeed_is_installed and \n # deepspeed.utils.is_initialized()\n # )\n # if(d is torch.bfloat16 and not deepspeed_is_initialized):\n # with torch.cuda.amp.autocast(enabled=False):\n # out = nn.functional.layer_norm(\n # x, \n # self.c_in, \n # self.weight.to(dtype=d), \n # self.bias.to(dtype=d), \n # self.eps\n # )\n # else:\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out" }, { "identifier": "ipa_point_weights_init_", "path": "frame2seq/openfold/model/primitives.py", "snippet": "def ipa_point_weights_init_(weights):\n with torch.no_grad():\n softplus_inverse_1 = 0.541324854612918\n weights.fill_(softplus_inverse_1)" }, { "identifier": "restype_rigid_group_default_frame", "path": "frame2seq/openfold/np/residue_constants.py", "snippet": "def load_stereo_chemical_props() -> Tuple[\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(\n sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False\n) -> np.ndarray:\ndef _make_standard_atom_mask() -> np.ndarray:\ndef chi_angle_atom(atom_index: int) -> np.ndarray:\ndef _make_rigid_transformation_4x4(ex, ey, translation):\ndef _make_rigid_group_constants():\ndef make_atom14_dists_bounds(\n overlap_tolerance=1.5, bond_length_tolerance_factor=15\n):\ndef _make_atom14_ambiguity_feats():\ndef aatype_to_str_sequence(aatype):\nHHBLITS_AA_TO_ID = {\n \"A\": 0,\n \"B\": 2,\n \"C\": 1,\n \"D\": 2,\n \"E\": 3,\n \"F\": 4,\n \"G\": 5,\n \"H\": 6,\n \"I\": 7,\n \"J\": 20,\n \"K\": 8,\n \"L\": 9,\n \"M\": 10,\n \"N\": 11,\n \"O\": 20,\n \"P\": 12,\n \"Q\": 13,\n \"R\": 14,\n \"S\": 15,\n \"T\": 16,\n \"U\": 1,\n \"V\": 17,\n \"W\": 18,\n \"X\": 20,\n \"Y\": 19,\n \"Z\": 3,\n \"-\": 21,\n}\nID_TO_HHBLITS_AA = {\n 0: \"A\",\n 1: \"C\", # Also U.\n 2: \"D\", # Also B.\n 3: \"E\", # Also Z.\n 4: \"F\",\n 5: \"G\",\n 6: \"H\",\n 7: \"I\",\n 8: \"K\",\n 9: \"L\",\n 10: \"M\",\n 11: \"N\",\n 12: \"P\",\n 13: \"Q\",\n 14: \"R\",\n 15: \"S\",\n 16: \"T\",\n 17: \"V\",\n 18: \"W\",\n 19: \"Y\",\n 20: \"X\", # Includes J and O.\n 21: \"-\",\n}\nMAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(\n restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])\n for i in range(len(restypes_with_x_and_gap))\n)\nSTANDARD_ATOM_MASK = _make_standard_atom_mask()" }, { "identifier": "frames_and_literature_positions_to_atom14_pos", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def frames_and_literature_positions_to_atom14_pos(\n r: Rigid,\n aatype: torch.Tensor,\n default_frames,\n group_idx,\n atom_mask,\n lit_positions,\n):\n # [*, N, 14, 4, 4]\n default_4x4 = default_frames[aatype, ...]\n\n # [*, N, 14]\n group_mask = group_idx[aatype, ...]\n\n # [*, N, 14, 8]\n group_mask = nn.functional.one_hot(\n group_mask,\n num_classes=default_frames.shape[-3],\n )\n\n # [*, N, 14, 8]\n t_atoms_to_global = r[..., None, :] * group_mask\n\n # [*, N, 14]\n t_atoms_to_global = t_atoms_to_global.map_tensor_fn(\n lambda x: torch.sum(x, dim=-1)\n )\n\n # [*, N, 14, 1]\n atom_mask = atom_mask[aatype, ...].unsqueeze(-1)\n\n # [*, N, 14, 3]\n lit_positions = lit_positions[aatype, ...]\n pred_positions = t_atoms_to_global.apply(lit_positions)\n pred_positions = pred_positions * atom_mask\n\n return pred_positions" }, { "identifier": "torsion_angles_to_frames", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def torsion_angles_to_frames(\n r: Rigid,\n alpha: torch.Tensor,\n aatype: torch.Tensor,\n rrgdf: torch.Tensor,\n):\n # [*, N, 8, 4, 4]\n default_4x4 = rrgdf[aatype, ...]\n\n # [*, N, 8] transformations, i.e.\n # One [*, N, 8, 3, 3] rotation matrix and\n # One [*, N, 8, 3] translation matrix\n default_r = r.from_tensor_4x4(default_4x4)\n\n bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))\n bb_rot[..., 1] = 1\n\n # [*, N, 8, 2]\n alpha = torch.cat(\n [bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2\n )\n\n # [*, N, 8, 3, 3]\n # Produces rotation matrices of the form:\n # [\n # [1, 0 , 0 ],\n # [0, a_2,-a_1],\n # [0, a_1, a_2]\n # ]\n # This follows the original code rather than the supplement, which uses\n # different indices.\n\n all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape)\n all_rots[..., 0, 0] = 1\n all_rots[..., 1, 1] = alpha[..., 1]\n all_rots[..., 1, 2] = -alpha[..., 0]\n all_rots[..., 2, 1:] = alpha\n\n all_rots = Rigid(Rotation(rot_mats=all_rots), None)\n\n all_frames = default_r.compose(all_rots)\n\n chi2_frame_to_frame = all_frames[..., 5]\n chi3_frame_to_frame = all_frames[..., 6]\n chi4_frame_to_frame = all_frames[..., 7]\n\n chi1_frame_to_bb = all_frames[..., 4]\n chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)\n chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)\n chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)\n\n all_frames_to_bb = Rigid.cat(\n [\n all_frames[..., :5],\n chi2_frame_to_bb.unsqueeze(-1),\n chi3_frame_to_bb.unsqueeze(-1),\n chi4_frame_to_bb.unsqueeze(-1),\n ],\n dim=-1,\n )\n\n all_frames_to_global = r[..., None].compose(all_frames_to_bb)\n\n return all_frames_to_global" }, { "identifier": "is_fp16_enabled", "path": "frame2seq/openfold/utils/precision_utils.py", "snippet": "def is_fp16_enabled():\n # Autocast world\n try:\n fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16\n fp16_enabled = fp16_enabled and torch.is_autocast_enabled()\n except AttributeError:\n fp16_enabled = False\n\n return fp16_enabled" }, { "identifier": "Rotation", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rotation:\n \"\"\"\n A 3D rotation. Depending on how the object is initialized, the\n rotation is represented by either a rotation matrix or a\n quaternion, though both formats are made available by helper functions.\n To simplify gradient computation, the underlying format of the\n rotation cannot be changed in-place. Like Rigid, the class is designed\n to mimic the behavior of a torch Tensor, almost as if each Rotation\n object were a tensor of rotations, in one format or another.\n \"\"\"\n def __init__(self,\n rot_mats: Optional[torch.Tensor] = None,\n quats: Optional[torch.Tensor] = None,\n normalize_quats: bool = True,\n ):\n \"\"\"\n Args:\n rot_mats:\n A [*, 3, 3] rotation matrix tensor. Mutually exclusive with\n quats\n quats:\n A [*, 4] quaternion. Mutually exclusive with rot_mats. If\n normalize_quats is not True, must be a unit quaternion\n normalize_quats:\n If quats is specified, whether to normalize quats\n \"\"\"\n if((rot_mats is None and quats is None) or \n (rot_mats is not None and quats is not None)):\n raise ValueError(\"Exactly one input argument must be specified\")\n\n if((rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or \n (quats is not None and quats.shape[-1] != 4)):\n raise ValueError(\n \"Incorrectly shaped rotation matrix or quaternion\"\n )\n\n # Force full-precision\n if(quats is not None):\n quats = quats.to(dtype=torch.float32)\n if(rot_mats is not None):\n rot_mats = rot_mats.to(dtype=torch.float32)\n\n if(quats is not None and normalize_quats):\n quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)\n\n self._rot_mats = rot_mats\n self._quats = quats\n\n @staticmethod\n def identity(\n shape,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rotation:\n \"\"\"\n Returns an identity Rotation.\n\n Args:\n shape:\n The \"shape\" of the resulting Rotation object. See documentation\n for the shape property\n dtype:\n The torch dtype for the rotation\n device:\n The torch device for the new rotation\n requires_grad:\n Whether the underlying tensors in the new rotation object\n should require gradient computation\n fmt:\n One of \"quat\" or \"rot_mat\". Determines the underlying format\n of the new object's rotation \n Returns:\n A new identity rotation\n \"\"\"\n if(fmt == \"rot_mat\"):\n rot_mats = identity_rot_mats(\n shape, dtype, device, requires_grad,\n )\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(fmt == \"quat\"):\n quats = identity_quats(shape, dtype, device, requires_grad)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(f\"Invalid format: f{fmt}\")\n\n # Magic methods\n\n def __getitem__(self, index: Any) -> Rotation:\n \"\"\"\n Allows torch-style indexing over the virtual shape of the rotation\n object. See documentation for the shape property.\n\n Args:\n index:\n A torch index. E.g. (1, 3, 2), or (slice(None,))\n Returns:\n The indexed rotation\n \"\"\"\n if type(index) != tuple:\n index = (index,)\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n return Rotation(rot_mats=rot_mats)\n elif(self._quats is not None):\n quats = self._quats[index + (slice(None),)]\n return Rotation(quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Pointwise left multiplication of the rotation with a tensor. Can be\n used to e.g. mask the Rotation.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats * right[..., None, None]\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats * right[..., None]\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Reverse pointwise multiplication of the rotation with a tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n \n # Properties\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the virtual shape of the rotation object. This shape is\n defined as the batch dimensions of the underlying rotation matrix\n or quaternion. If the Rotation was initialized with a [10, 3, 3]\n rotation matrix tensor, for example, the resulting shape would be\n [10].\n \n Returns:\n The virtual shape of the rotation object\n \"\"\"\n s = None\n if(self._quats is not None):\n s = self._quats.shape[:-1]\n else:\n s = self._rot_mats.shape[:-2]\n\n return s\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Returns the dtype of the underlying rotation.\n\n Returns:\n The dtype of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.dtype\n elif(self._quats is not None):\n return self._quats.dtype\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n The device of the underlying rotation\n\n Returns:\n The device of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.device\n elif(self._quats is not None):\n return self._quats.device\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def requires_grad(self) -> bool:\n \"\"\"\n Returns the requires_grad property of the underlying rotation\n\n Returns:\n The requires_grad property of the underlying tensor\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.requires_grad\n elif(self._quats is not None):\n return self._quats.requires_grad\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_rot_mats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a rotation matrix tensor.\n\n Returns:\n The rotation as a rotation matrix tensor\n \"\"\"\n rot_mats = self._rot_mats\n if(rot_mats is None):\n if(self._quats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n rot_mats = quat_to_rot(self._quats)\n\n return rot_mats \n\n def get_quats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a quaternion tensor.\n\n Depending on whether the Rotation was initialized with a\n quaternion, this function may call torch.linalg.eigh.\n\n Returns:\n The rotation as a quaternion tensor.\n \"\"\"\n quats = self._quats\n if(quats is None):\n if(self._rot_mats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n quats = rot_to_quat(self._rot_mats)\n\n return quats\n\n def get_cur_rot(self) -> torch.Tensor:\n \"\"\"\n Return the underlying rotation in its current form\n\n Returns:\n The stored rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats\n elif(self._quats is not None):\n return self._quats\n else:\n raise ValueError(\"Both rotations are None\")\n\n # Rotation functions\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor, \n normalize_quats: bool = True\n ) -> Rotation:\n \"\"\"\n Returns a new quaternion Rotation after updating the current\n object's underlying rotation with a quaternion update, formatted\n as a [*, 3] tensor whose final three columns represent x, y, z such \n that (1, x, y, z) is the desired (not necessarily unit) quaternion\n update.\n\n Args:\n q_update_vec:\n A [*, 3] quaternion update tensor\n normalize_quats:\n Whether to normalize the output quaternion\n Returns:\n An updated Rotation\n \"\"\"\n quats = self.get_quats()\n new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)\n return Rotation(\n rot_mats=None, \n quats=new_quats, \n normalize_quats=normalize_quats,\n )\n\n def compose_r(self, r: Rotation) -> Rotation:\n \"\"\"\n Compose the rotation matrices of the current Rotation object with\n those of another.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n r1 = self.get_rot_mats()\n r2 = r.get_rot_mats()\n new_rot_mats = rot_matmul(r1, r2)\n return Rotation(rot_mats=new_rot_mats, quats=None)\n\n def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:\n \"\"\"\n Compose the quaternions of the current Rotation object with those\n of another.\n\n Depending on whether either Rotation was initialized with\n quaternions, this function may call torch.linalg.eigh.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n q1 = self.get_quats()\n q2 = r.get_quats()\n new_quats = quat_multiply(q1, q2)\n return Rotation(\n rot_mats=None, quats=new_quats, normalize_quats=normalize_quats\n )\n\n def apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Apply the current Rotation as a rotation matrix to a set of 3D\n coordinates.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n return rot_vec_mul(rot_mats, pts)\n\n def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The inverse of the apply() method.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] inverse-rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n inv_rot_mats = invert_rot_mat(rot_mats) \n return rot_vec_mul(inv_rot_mats, pts)\n\n def invert(self) -> Rotation:\n \"\"\"\n Returns the inverse of the current Rotation.\n\n Returns:\n The inverse of the current Rotation\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=invert_rot_mat(self._rot_mats), \n quats=None\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None,\n quats=invert_quat(self._quats),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n # \"Tensor\" stuff\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shape of the Rotation object.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed Rotation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n @staticmethod\n def cat(\n rs: Sequence[Rotation], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates rotations along one of the batch dimensions. Analogous\n to torch.cat().\n\n Note that the output of this operation is always a rotation matrix,\n regardless of the format of input rotations.\n\n Args:\n rs: \n A list of rotation objects\n dim: \n The dimension along which the rotations should be \n concatenated\n Returns:\n A concatenated Rotation object in rotation matrix format\n \"\"\"\n rot_mats = [r.get_rot_mats() for r in rs]\n rot_mats = torch.cat(rot_mats, dim=dim if dim >= 0 else dim - 2)\n\n return Rotation(rot_mats=rot_mats, quats=None) \n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rotation:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying rotation tensors,\n mapping over the rotation dimension(s). Can be used e.g. to sum out\n a one-hot batch dimension.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rotation \n Returns:\n The transformed Rotation object\n \"\"\" \n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))\n rot_mats = torch.stack(\n list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1\n )\n rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = torch.stack(\n list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1\n )\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n \n def cuda(self) -> Rotation:\n \"\"\"\n Analogous to the cuda() method of torch Tensors\n\n Returns:\n A copy of the Rotation in CUDA memory\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.cuda(),\n normalize_quats=False\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def to(self, \n device: Optional[torch.device], \n dtype: Optional[torch.dtype]\n ) -> Rotation:\n \"\"\"\n Analogous to the to() method of torch Tensors\n\n Args:\n device:\n A torch device\n dtype:\n A torch dtype\n Returns:\n A copy of the Rotation using the new device and dtype\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=self._rot_mats.to(device=device, dtype=dtype), \n quats=None,\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.to(device=device, dtype=dtype),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def detach(self) -> Rotation:\n \"\"\"\n Returns a copy of the Rotation whose underlying Tensor has been\n detached from its torch graph.\n\n Returns:\n A copy of the Rotation whose underlying Tensor has been detached\n from its torch graph\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.detach(), \n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")" }, { "identifier": "Rigid", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "dict_multimap", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict" }, { "identifier": "permute_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])" }, { "identifier": "flatten_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))" } ]
from functools import reduce from operator import mul from typing import Optional, Tuple, Sequence from frame2seq.openfold.model.primitives import Linear, LayerNorm, ipa_point_weights_init_ from frame2seq.openfold.np.residue_constants import ( restype_rigid_group_default_frame, restype_atom14_to_rigid_group, restype_atom14_mask, restype_atom14_rigid_group_positions, ) from frame2seq.openfold.utils.feats import ( frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames, ) from frame2seq.openfold.utils.precision_utils import is_fp16_enabled from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) import importlib import math import sys import torch import torch.nn as nn
14,744
# [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res] if(is_fp16_enabled()): with torch.cuda.amp.autocast(enabled=False): a = torch.matmul( permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res] ) else: a = torch.matmul( permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res] ) a *= math.sqrt(1.0 / (3 * self.c_hidden)) a += (math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))) # [*, N_res, N_res, H, P_q, 3] pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) if(inplace_safe): pt_att *= pt_att else: pt_att = pt_att ** 2 # [*, N_res, N_res, H, P_q] pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view( *((1,) * len(pt_att.shape[:-2]) + (-1, 1)) ) head_weights = head_weights * math.sqrt( 1.0 / (3 * (self.no_qk_points * 9.0 / 2)) ) if(inplace_safe): pt_att *= head_weights else: pt_att = pt_att * head_weights # [*, N_res, N_res, H] pt_att = torch.sum(pt_att, dim=-1) * (-0.5) # [*, N_res, N_res] square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.inf * (square_mask - 1) """ Frame2seq implementation of IPA regularization via attention dropout """ if attn_drop_rate > 0.0: random_square_mask = torch.rand(square_mask.shape, device=square_mask.device) random_square_mask = self.inf * -1 * (random_square_mask < attn_drop_rate) square_mask += random_square_mask # [*, H, N_res, N_res] pt_att = permute_final_dims(pt_att, (2, 0, 1)) if(inplace_safe): a += pt_att del pt_att a += square_mask.unsqueeze(-3) # in-place softmax attn_core_inplace_cuda.forward_( a, reduce(mul, a.shape[:-1]), a.shape[-1], ) else: a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) ################ # Compute output ################ # [*, N_res, H, C_hidden] o = torch.matmul( a, v.transpose(-2, -3).to(dtype=a.dtype) ).transpose(-2, -3) # [*, N_res, H * C_hidden]
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. attn_core_inplace_cuda = False class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden, init="relu") self.linear_2 = Linear(self.c_hidden, self.c_hidden, init="final") self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ Implements Algorithm 20, lines 11-14 """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # NOTE: The ReLU's applied to the inputs are absent from the supplement # pseudocode but present in the source. For maximal compatibility with # the pretrained weights, I'm going with the source. # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, inplace_safe: bool = False, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, attn_drop_rate = 0.0, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ if(_offload_inference and inplace_safe): z = _z_reference_list else: z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res] if(is_fp16_enabled()): with torch.cuda.amp.autocast(enabled=False): a = torch.matmul( permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res] ) else: a = torch.matmul( permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res] ) a *= math.sqrt(1.0 / (3 * self.c_hidden)) a += (math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))) # [*, N_res, N_res, H, P_q, 3] pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) if(inplace_safe): pt_att *= pt_att else: pt_att = pt_att ** 2 # [*, N_res, N_res, H, P_q] pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view( *((1,) * len(pt_att.shape[:-2]) + (-1, 1)) ) head_weights = head_weights * math.sqrt( 1.0 / (3 * (self.no_qk_points * 9.0 / 2)) ) if(inplace_safe): pt_att *= head_weights else: pt_att = pt_att * head_weights # [*, N_res, N_res, H] pt_att = torch.sum(pt_att, dim=-1) * (-0.5) # [*, N_res, N_res] square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.inf * (square_mask - 1) """ Frame2seq implementation of IPA regularization via attention dropout """ if attn_drop_rate > 0.0: random_square_mask = torch.rand(square_mask.shape, device=square_mask.device) random_square_mask = self.inf * -1 * (random_square_mask < attn_drop_rate) square_mask += random_square_mask # [*, H, N_res, N_res] pt_att = permute_final_dims(pt_att, (2, 0, 1)) if(inplace_safe): a += pt_att del pt_att a += square_mask.unsqueeze(-3) # in-place softmax attn_core_inplace_cuda.forward_( a, reduce(mul, a.shape[:-1]), a.shape[-1], ) else: a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) ################ # Compute output ################ # [*, N_res, H, C_hidden] o = torch.matmul( a, v.transpose(-2, -3).to(dtype=a.dtype) ).transpose(-2, -3) # [*, N_res, H * C_hidden]
o = flatten_final_dims(o, 2)
11
2023-12-25 09:29:36+00:00
24k
iKala/ievals
ievals/cli/ieval.py
[ { "identifier": "TGI_Evaluator", "path": "ievals/modules/qa_evaluators/tgi.py", "snippet": "class TGI_Evaluator(Evaluator):\n def __init__(\n self,\n choices,\n k,\n ip_addr,\n model_name,\n systemMessageToken=\"<|im_start|>system\\n\",\n messageEndToken=\"<|im_end|>\",\n assistantMessageToken=\"<|im_start|>assistant\\n\",\n userMessageToken=\"<|im_start|>user\\n\",\n switch_zh_hans=False,\n ):\n super(TGI_Evaluator, self).__init__(choices, model_name, k)\n self.ip_addr = ip_addr\n self.model_name = model_name\n self.userMessageToken = userMessageToken\n self.assistantMessageToken = assistantMessageToken\n self.messageEndToken = messageEndToken\n self.systemMessageToken = systemMessageToken\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n text = \"\"\n for prompt in full_prompt:\n if prompt[\"role\"] == \"system\":\n text += (\n self.systemMessageToken\n + prompt[\"content\"]\n + self.messageEndToken\n )\n elif prompt[\"role\"] == \"user\":\n text += (\n self.userMessageToken + prompt[\"content\"] + self.messageEndToken\n )\n elif prompt[\"role\"] == \"assistant\":\n text += (\n self.assistantMessageToken\n + prompt[\"content\"]\n + self.messageEndToken\n )\n text += self.assistantMessageToken\n if self.converter:\n text = self.converter.convert(text)\n\n while response is None and timeout_counter <= 30:\n try:\n response = requests.post(\n f\"http://{self.ip_addr}/generate\",\n data=json.dumps(\n {\n \"inputs\": text,\n \"parameters\": {\n \"max_new_tokens\": 90,\n \"temperature\": 0.001,\n \"stop\": [self.messageEndToken],\n },\n }\n ),\n headers={\"Content-Type\": \"application/json\"},\n )\n r = response.json()\n if \"generated_text\" not in r:\n raise ValueError(\"not found: \" + str(r))\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.json()[\"generated_text\"].split(\n self.messageEndToken\n )[0]\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]).\",\n r\"答案:([A-D])\",\n r\"([A-D]). \",\n r\"^選([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"^選項([A-D])\",\n r\"答案是\\s?选?项?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案應該是:\\s?选?项?\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"正確答案是([A-D])\",\n r\"正確答案是 ([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "Gemini_Evaluator", "path": "ievals/modules/qa_evaluators/gemini.py", "snippet": "class Gemini_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(Gemini_Evaluator, self).__init__(choices, model_name, k)\n genai.configure(api_key=api_key)\n\n self.model = genai.GenerativeModel(\n model_name,\n safety_settings=[\n {\n \"category\": \"HARM_CATEGORY_HARASSMENT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n ],\n )\n\n self.model_name = model_name\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI主力,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI主力,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n text = []\n prev_role = \"\"\n for prompt in full_prompt:\n if prompt[\"role\"] == \"system\":\n text.append(prompt[\"content\"] + \"\\n\")\n elif prompt[\"role\"] == \"user\":\n if prev_role == \"system\":\n text[-1] += \"問題: \" + prompt[\"content\"] + \"\\n\"\n else:\n text.append(\"問題: \" + prompt[\"content\"] + \"\\n\")\n elif prompt[\"role\"] == \"assistant\":\n text.append(prompt[\"content\"] + \"\\n\")\n prev_role = prompt[\"role\"]\n if self.converter:\n text = [self.converter.convert(seg) for seg in text]\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.model.generate_content(text)\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n\n if response == None:\n response_str = \"\"\n else:\n try:\n response_str = response.text\n except (ValueError, IndexError):\n response_str = \"\"\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?选?项?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "Claude_Evaluator", "path": "ievals/modules/qa_evaluators/claude.py", "snippet": "class Claude_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(Claude_Evaluator, self).__init__(choices, model_name, k)\n self.client = anthropic.Anthropic(api_key=api_key)\n self.model_name\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請直接選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請直接選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請直接選出正確的答案。\",\n }\n ]\n\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請直接選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n text = \"\"\n for prompt in full_prompt:\n if prompt[\"role\"] == \"system\":\n text += anthropic.HUMAN_PROMPT + \" \" + prompt[\"content\"]\n elif prompt[\"role\"] == \"user\":\n text += anthropic.HUMAN_PROMPT + \" \" + prompt[\"content\"]\n elif prompt[\"role\"] == \"assistant\":\n text += anthropic.AI_PROMPT + \" \" + prompt[\"content\"]\n text += anthropic.AI_PROMPT\n if self.converter:\n text = self.converter.convert(text)\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.completions.create(\n prompt=text,\n stop_sequences=[anthropic.HUMAN_PROMPT],\n model=self.model_name,\n temperature=0.1,\n max_tokens_to_sample=300,\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.completion\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"正確的答案應該是:.*?\\b([A-D])\\b\",\n r\"正確的選項應為:.*?\\b([A-D])\\b\",\n r\"所以答案為([A-D])\",\n r\"答案為\\s?([A-D])\",\n r\"所以下列方程式的解是([A-D])\",\n r\"选([A-D])\",\n r\"选项([A-D])\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是\\s?选?项?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str, re.DOTALL)\n else:\n break\n return ans_list" }, { "identifier": "Azure_Evaluator", "path": "ievals/modules/qa_evaluators/azure.py", "snippet": "class Azure_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(Azure_Evaluator, self).__init__(choices, model_name, k)\n self.client = AzureOpenAI(\n api_key=api_key,\n api_version=os.getenv(\"AZURE_OPENAI_VERSION\", \"2023-07-01-preview\"),\n azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n )\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n\n prompt += tmp\n\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n\n if self.converter:\n converted = []\n for p in full_prompt:\n p[\"content\"] = self.converter.convert(p[\"content\"])\n converted.append(p)\n full_prompt = converted\n\n response = None\n timeout_counter = 0\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.chat.completions.create(\n model=self.model_name, messages=full_prompt, temperature=0.0\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n\n response_str = \"\"\n if response != None:\n response_str = response.choices[0].message.content\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n if response_str is None:\n response_str = \"\"\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "GPT_Evaluator", "path": "ievals/modules/qa_evaluators/oai_complete.py", "snippet": "class GPT_Evaluator(Evaluator):\n \"\"\"\n Completion endpoint for instruction based model\n davinci, gpt-3.5-instruct\n \"\"\"\n\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(GPT_Evaluator, self).__init__(choices, model_name, k)\n openai.api_key = api_key\n self.client = openai.OpenAI(api_key=api_key)\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n if self.converter:\n converted = []\n for p in full_prompt:\n p[\"content\"] = self.converter.convert(p[\"content\"])\n converted.append(p)\n full_prompt = converted\n\n text = \"\"\n for prompt in full_prompt:\n text += prompt[\"content\"] + \"\\n\"\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.completions.create(\n model=self.model_name, prompt=text, temperature=0.0\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.choices[0].text\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter: # simplified chinese\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "ChatGPT_Evaluator", "path": "ievals/modules/qa_evaluators/chatgpt.py", "snippet": "class ChatGPT_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(ChatGPT_Evaluator, self).__init__(choices, model_name, k)\n openai.api_key = api_key\n self.client = openai.OpenAI(api_key=api_key)\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n prompt += tmp\n\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n if self.converter: # convert to simplified chinese\n for idx, prompt in enumerate(full_prompt):\n full_prompt[idx][\"content\"] = self.converter.convert(\n prompt[\"content\"]\n )\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.chat.completions.create(\n model=self.model_name,\n messages=full_prompt,\n temperature=0.0,\n max_tokens=200,\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.choices[0].message.content\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n # manually found regex which can be used to parse most of the response\n # text\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "DashScope_Evaluator", "path": "ievals/modules/qa_evaluators/ali_dashscope.py", "snippet": "class DashScope_Evaluator(Evaluator):\n \"\"\"\n Completion endpoint for instruction based model\n qwen models\n \"\"\"\n\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(DashScope_Evaluator, self).__init__(choices, model_name, k)\n dashscope.api_key = api_key\n assert model_name in set(Generation.Models.__dict__.values())\n self.model_name = model_name\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n if self.converter:\n converted = []\n for p in full_prompt:\n p[\"content\"] = self.converter.convert(p[\"content\"])\n converted.append(p)\n full_prompt = converted\n\n text = \"\"\n for prompt in full_prompt:\n text += prompt[\"content\"] + \"\\n\"\n\n while response is None and timeout_counter <= 30:\n try:\n response = Generation.call(model=self.model_name, prompt=text)\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n\n if response.status_code == HTTPStatus.OK:\n response_str = response.output.text\n else:\n response_str = \"\"\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter: # simplified chinese\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "run_exp", "path": "ievals/exp_executer.py", "snippet": "def run_exp(\n evaluator,\n model_name,\n dataset,\n postfix_name=\"tgi\",\n cache_path=\".cache\",\n split_name=\"test\",\n few_shot=False,\n):\n model_name_path = model_name.replace(\"/\", \"_\")\n save_result_dir = None\n\n if cache_path:\n os.makedirs(f\"{cache_path}\", exist_ok=True)\n os.makedirs(f\"{cache_path}/{model_name_path}\", exist_ok=True)\n save_result_dir = f\"{cache_path}/{model_name_path}\"\n\n task_list, subject2name, subject2category = get_exp_setting(dataset)\n postfix = model_name.split(\"/\")[-1]\n prefix_name = dataset.split(\"/\")[-1]\n result_cache = f\"{prefix_name}_{postfix_name}.tsv\"\n if os.path.exists(result_cache):\n logging.info(f\"Found previous cache {result_cache}, skipping executed subjects\")\n df = pd.read_csv(result_cache, delimiter=\"\\t\", header=None)\n df.columns = [\"model_name\", \"subject\", \"score\"]\n finished_subjects = df[\"subject\"].tolist()\n task_list = [t for t in task_list if t not in finished_subjects]\n\n output_filename = \"\"\n # TODO: absract out the dataset-task logic, as this is likely\n # limited under multi subject task only\n for task in task_list:\n zh_name = subject2name[task]\n test = load_dataset(dataset, task)[split_name]\n test_df = pd.DataFrame([dict(row) for row in test])\n dev = load_dataset(dataset, task)[\"train\"]\n dev_df = pd.DataFrame([dict(row) for row in dev])\n\n accuracy = evaluator.eval_subject(\n zh_name,\n test_df,\n dev_df=dev_df,\n few_shot=few_shot,\n save_result_dir=f\"{cache_path}/{model_name_path}\",\n )\n\n with open(result_cache, \"a\") as fout:\n fout.write(\"{}\\t{}\\t{:.5f}\\n\".format(model_name, task, accuracy))\n\n df = pd.read_csv(result_cache, delimiter=\"\\t\", header=None)\n df.columns = [\"model_name\", \"subject\", \"score\"]\n for model_name in df[\"model_name\"].unique():\n print(model_name)" } ]
import os import logging import argparse import pandas as pd from datasets import load_dataset from ievals.modules.qa_evaluators.tgi import TGI_Evaluator from ievals.modules.qa_evaluators.gemini import Gemini_Evaluator from ievals.modules.qa_evaluators.claude import Claude_Evaluator from ievals.modules.qa_evaluators.azure import Azure_Evaluator from ievals.modules.qa_evaluators.oai_complete import GPT_Evaluator from ievals.modules.qa_evaluators.chatgpt import ChatGPT_Evaluator from ievals.modules.qa_evaluators.hf_chat import HF_Chat_Evaluator from ievals.modules.qa_evaluators.hf_base import ( Qwen_Evaluator, ) # we only use this for qwen base model from ievals.modules.qa_evaluators.ali_dashscope import DashScope_Evaluator from ievals.exp_executer import run_exp
19,593
""" CLI for all models Support mode: if tgi service was used you must pass in IP and hostname if the service was found in model_config.csv you could skip providing the 4 tokens (user, assistant, system, eos) else you need to pass in the four token in args """ try: except ImportError as e: logging.error("huggingface and qwen models are not supported due to " + str(e)) def get_model_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) up_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) df = pd.read_csv(os.path.join(up_dir, "model_config.csv")) df.fillna("", inplace=True) valid_model_names = df["model_name"].tolist() return valid_model_names, df def get_tgi_prompt_config(model_name): valid_model_names, df = get_model_config() if model_name not in valid_model_names: return None, None prompt_config = df[df["model_name"] == model_name].iloc[0] prompt_config.pop("model_name") return prompt_config def get_evaluator(model_name, series=""): if len(series): if series == "azure": return Azure_Evaluator elif series == "openai_chat": return ChatGPT_Evaluator elif series == "openai_complete": return GPT_Evaluator elif series == "gemini": return Gemini_Evaluator elif series == "hf_chat": # implement the chat function return HF_Chat_Evaluator elif series == "tgi": # implement the chat function return TGI_Evaluator l_model_name = model_name.lower() if "gemini" in model_name: return Gemini_Evaluator if "gpt-" in model_name: # its possible to match gpt-3.5-instruct, # but we don't really want to sacrifice more fixed params for that return ChatGPT_Evaluator elif "claude" in model_name:
""" CLI for all models Support mode: if tgi service was used you must pass in IP and hostname if the service was found in model_config.csv you could skip providing the 4 tokens (user, assistant, system, eos) else you need to pass in the four token in args """ try: except ImportError as e: logging.error("huggingface and qwen models are not supported due to " + str(e)) def get_model_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) up_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) df = pd.read_csv(os.path.join(up_dir, "model_config.csv")) df.fillna("", inplace=True) valid_model_names = df["model_name"].tolist() return valid_model_names, df def get_tgi_prompt_config(model_name): valid_model_names, df = get_model_config() if model_name not in valid_model_names: return None, None prompt_config = df[df["model_name"] == model_name].iloc[0] prompt_config.pop("model_name") return prompt_config def get_evaluator(model_name, series=""): if len(series): if series == "azure": return Azure_Evaluator elif series == "openai_chat": return ChatGPT_Evaluator elif series == "openai_complete": return GPT_Evaluator elif series == "gemini": return Gemini_Evaluator elif series == "hf_chat": # implement the chat function return HF_Chat_Evaluator elif series == "tgi": # implement the chat function return TGI_Evaluator l_model_name = model_name.lower() if "gemini" in model_name: return Gemini_Evaluator if "gpt-" in model_name: # its possible to match gpt-3.5-instruct, # but we don't really want to sacrifice more fixed params for that return ChatGPT_Evaluator elif "claude" in model_name:
return Claude_Evaluator
2
2023-12-24 08:00:38+00:00
24k
kraina-ai/quackosm
quackosm/functions.py
[ { "identifier": "GroupedOsmTagsFilter", "path": "quackosm/_osm_tags_filters.py", "snippet": "def merge_osm_tags_filter(osm_tags_filter: OsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: GroupedOsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: Iterable[OsmTagsFilter]) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: Iterable[GroupedOsmTagsFilter]) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(\n osm_tags_filter: Union[\n OsmTagsFilter, GroupedOsmTagsFilter, Iterable[OsmTagsFilter], Iterable[GroupedOsmTagsFilter]\n ]\n) -> OsmTagsFilter:\ndef _merge_grouped_osm_tags_filter(grouped_filter: GroupedOsmTagsFilter) -> OsmTagsFilter:\ndef _merge_multiple_osm_tags_filters(osm_tags_filters: Iterable[OsmTagsFilter]) -> OsmTagsFilter:" }, { "identifier": "OsmWayPolygonConfig", "path": "quackosm/_osm_way_polygon_features.py", "snippet": "class OsmWayPolygonConfig(NamedTuple):\n \"\"\"OSM Way polygon features config object.\"\"\"\n\n all: Iterable[str]\n allowlist: dict[str, Iterable[str]]\n denylist: dict[str, Iterable[str]]" }, { "identifier": "PbfFileReader", "path": "quackosm/pbf_file_reader.py", "snippet": "class PbfFileReader:\n \"\"\"\n PbfFileReader.\n\n PBF(Protocolbuffer Binary Format)[1] file reader is a dedicated `*.osm.pbf` files reader\n class based on DuckDB[2] and its spatial extension[3].\n\n Handler can filter out OSM features based on tags filter and geometry filter\n to limit the result.\n\n References:\n 1. https://wiki.openstreetmap.org/wiki/PBF_Format\n 2. https://duckdb.org/\n 3. https://github.com/duckdb/duckdb_spatial\n \"\"\"\n\n class ConvertedOSMParquetFiles(NamedTuple):\n \"\"\"List of parquet files read from the `*.osm.pbf` file.\"\"\"\n\n nodes_valid_with_tags: \"duckdb.DuckDBPyRelation\"\n nodes_filtered_ids: \"duckdb.DuckDBPyRelation\"\n\n ways_all_with_tags: \"duckdb.DuckDBPyRelation\"\n ways_with_unnested_nodes_refs: \"duckdb.DuckDBPyRelation\"\n ways_required_ids: \"duckdb.DuckDBPyRelation\"\n ways_filtered_ids: \"duckdb.DuckDBPyRelation\"\n\n relations_all_with_tags: \"duckdb.DuckDBPyRelation\"\n relations_with_unnested_way_refs: \"duckdb.DuckDBPyRelation\"\n relations_filtered_ids: \"duckdb.DuckDBPyRelation\"\n\n class ParsedOSMFeatures(NamedTuple):\n \"\"\"Final list of parsed features from the `*.osm.pbf` file.\"\"\"\n\n nodes: \"duckdb.DuckDBPyRelation\"\n ways: \"duckdb.DuckDBPyRelation\"\n relations: \"duckdb.DuckDBPyRelation\"\n\n def __init__(\n self,\n tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None,\n geometry_filter: Optional[BaseGeometry] = None,\n working_directory: Union[str, Path] = \"files\",\n osm_way_polygon_features_config: Optional[\n Union[OsmWayPolygonConfig, dict[str, Any]]\n ] = None,\n ) -> None:\n \"\"\"\n Initialize PbfFileReader.\n\n Args:\n tags_filter (Union[OsmTagsFilter, GroupedOsmTagsFilter], optional): A dictionary\n specifying which tags to download.\n The keys should be OSM tags (e.g. `building`, `amenity`).\n The values should either be `True` for retrieving all objects with the tag,\n string for retrieving a single tag-value pair\n or list of strings for retrieving all values specified in the list.\n `tags={'leisure': 'park}` would return parks from the area.\n `tags={'leisure': 'park, 'amenity': True, 'shop': ['bakery', 'bicycle']}`\n would return parks, all amenity types, bakeries and bicycle shops.\n If `None`, handler will allow all of the tags to be parsed. Defaults to `None`.\n geometry_filter (BaseGeometry, optional): Region which can be used to filter only\n intersecting OSM objects. Defaults to `None`.\n working_directory (Union[str, Path], optional): Directory where to save\n the parsed `*.parquet` files. Defaults to \"files\".\n osm_way_polygon_features_config (Union[OsmWayPolygonConfig, dict[str, Any]], optional):\n Config used to determine which closed way features are polygons.\n Modifications to this config left are left for experienced OSM users.\n Defaults to predefined \"osm_way_polygon_features.json\".\n \"\"\"\n self.tags_filter = tags_filter\n self.merged_tags_filter = merge_osm_tags_filter(tags_filter) if tags_filter else None\n self.geometry_filter = geometry_filter\n self.working_directory = Path(working_directory)\n self.working_directory.mkdir(parents=True, exist_ok=True)\n self.connection: duckdb.DuckDBPyConnection = None\n\n self.rows_per_bucket = 1_000_000\n memory = psutil.virtual_memory()\n # If less than 8 / 16 GB total memory, reduce number of rows per group\n if memory.total < (8 * (1024**3)):\n self.rows_per_bucket = 100_000\n elif memory.total < (16 * (1024**3)):\n self.rows_per_bucket = 500_000\n\n if osm_way_polygon_features_config is None:\n # Config based on two sources + manual OSM wiki check\n # 1. https://github.com/tyrasd/osm-polygon-features/blob/v0.9.2/polygon-features.json\n # 2. https://github.com/ideditor/id-area-keys/blob/v5.0.1/areaKeys.json\n osm_way_polygon_features_config = json.loads(\n (Path(__file__).parent / \"osm_way_polygon_features.json\").read_text()\n )\n\n self.osm_way_polygon_features_config: OsmWayPolygonConfig = (\n osm_way_polygon_features_config\n if isinstance(osm_way_polygon_features_config, OsmWayPolygonConfig)\n else parse_dict_to_config_object(osm_way_polygon_features_config)\n )\n\n def get_features_gdf(\n self,\n file_paths: Union[str, Path, Iterable[Union[str, Path]]],\n explode_tags: Optional[bool] = None,\n ignore_cache: bool = False,\n filter_osm_ids: Optional[list[str]] = None,\n ) -> gpd.GeoDataFrame:\n \"\"\"\n Get features GeoDataFrame from a list of PBF files.\n\n Function parses multiple PBF files and returns a single GeoDataFrame with parsed\n OSM objects.\n\n Args:\n file_paths (Union[str, Path, Iterable[Union[str, Path]]]):\n Path or list of paths of `*.osm.pbf` files to be parsed.\n explode_tags (bool, optional): Whether to split tags into columns based on OSM tag keys.\n If `None`, will be set based on `tags_filter` parameter.\n If no tags filter is provided, then `explode_tags` will set to `False`,\n if there is tags filter it will set to `True`. Defaults to `None`.\n ignore_cache: (bool, optional): Whether to ignore precalculated geoparquet files or not.\n Defaults to False.\n filter_osm_ids: (list[str], optional): List of OSM features ids to read from the file.\n Have to be in the form of 'node/<id>', 'way/<id>' or 'relation/<id>'.\n Defaults to an empty list.\n\n Returns:\n gpd.GeoDataFrame: GeoDataFrame with OSM features.\n \"\"\"\n if isinstance(file_paths, (str, Path)):\n file_paths = [file_paths]\n\n if filter_osm_ids is None:\n filter_osm_ids = []\n\n if explode_tags is None:\n explode_tags = self.tags_filter is not None\n\n parsed_geoparquet_files = []\n for file_path in file_paths:\n parsed_geoparquet_file = self.convert_pbf_to_gpq(\n file_path,\n explode_tags=explode_tags,\n ignore_cache=ignore_cache,\n filter_osm_ids=filter_osm_ids,\n )\n parsed_geoparquet_files.append(parsed_geoparquet_file)\n\n parquet_tables = [\n io.read_geoparquet_table(parsed_parquet_file) # type: ignore\n for parsed_parquet_file in parsed_geoparquet_files\n ]\n joined_parquet_table: pa.Table = pa.concat_tables(parquet_tables)\n gdf_parquet = gpd.GeoDataFrame(\n data=joined_parquet_table.drop(GEOMETRY_COLUMN).to_pandas(maps_as_pydicts=\"strict\"),\n geometry=ga.to_geopandas(joined_parquet_table.column(GEOMETRY_COLUMN)),\n ).set_index(FEATURES_INDEX)\n\n return gdf_parquet\n\n def convert_pbf_to_gpq(\n self,\n pbf_path: Union[str, Path],\n result_file_path: Optional[Union[str, Path]] = None,\n explode_tags: Optional[bool] = None,\n ignore_cache: bool = False,\n filter_osm_ids: Optional[list[str]] = None,\n ) -> Path:\n \"\"\"\n Convert PBF file to GeoParquet file.\n\n Args:\n pbf_path (Union[str, Path]): Pbf file to be parsed to GeoParquet.\n result_file_path (Union[str, Path], optional): Where to save\n the geoparquet file. If not provided, will be generated based on hashes\n from provided tags filter and geometry filter. Defaults to `None`.\n explode_tags (bool, optional): Whether to split tags into columns based on OSM tag keys.\n If `None`, will be set based on `tags_filter` parameter.\n If no tags filter is provided, then `explode_tags` will set to `False`,\n if there is tags filter it will set to `True`. Defaults to `None`.\n ignore_cache (bool, optional): Whether to ignore precalculated geoparquet files or not.\n Defaults to False.\n filter_osm_ids: (list[str], optional): List of OSM features ids to read from the file.\n Have to be in the form of 'node/<id>', 'way/<id>' or 'relation/<id>'.\n Defaults to an empty list.\n\n Returns:\n Path: Path to the generated GeoParquet file.\n \"\"\"\n if filter_osm_ids is None:\n filter_osm_ids = []\n\n if explode_tags is None:\n explode_tags = self.tags_filter is not None\n\n with tempfile.TemporaryDirectory(dir=self.working_directory.resolve()) as tmp_dir_name:\n try:\n self._set_up_duckdb_connection(tmp_dir_name)\n result_file_path = result_file_path or self._generate_geoparquet_result_file_path(\n pbf_path,\n filter_osm_ids=filter_osm_ids,\n explode_tags=explode_tags,\n )\n parsed_geoparquet_file = self._parse_pbf_file(\n pbf_path=pbf_path,\n tmp_dir_name=tmp_dir_name,\n result_file_path=Path(result_file_path),\n filter_osm_ids=filter_osm_ids,\n explode_tags=explode_tags,\n ignore_cache=ignore_cache,\n )\n return parsed_geoparquet_file\n finally:\n if self.connection is not None:\n self.connection.close()\n self.connection = None\n\n def _set_up_duckdb_connection(self, tmp_dir_name: str) -> None:\n self.connection = duckdb.connect(\n database=str(Path(tmp_dir_name) / \"db.duckdb\"),\n config=dict(preserve_insertion_order=False),\n )\n for extension_name in (\"parquet\", \"spatial\"):\n self.connection.install_extension(extension_name)\n self.connection.load_extension(extension_name)\n\n self.connection.sql(\"\"\"\n CREATE OR REPLACE MACRO linestring_to_linestring_wkt(ls) AS\n 'LINESTRING (' || array_to_string([pt.x || ' ' || pt.y for pt in ls], ', ') || ')';\n \"\"\")\n self.connection.sql(\"\"\"\n CREATE OR REPLACE MACRO linestring_to_polygon_wkt(ls) AS\n 'POLYGON ((' || array_to_string([pt.x || ' ' || pt.y for pt in ls], ', ') || '))';\n \"\"\")\n\n def _parse_pbf_file(\n self,\n pbf_path: Union[str, Path],\n tmp_dir_name: str,\n result_file_path: Path,\n filter_osm_ids: list[str],\n explode_tags: bool = True,\n ignore_cache: bool = False,\n ) -> Path:\n if not result_file_path.exists() or ignore_cache:\n elements = self.connection.sql(f\"SELECT * FROM ST_READOSM('{Path(pbf_path)}');\")\n converted_osm_parquet_files = self._prefilter_elements_ids(\n elements, tmp_dir_name, filter_osm_ids\n )\n\n self._delete_directories(\n tmp_dir_name,\n [\n \"nodes_filtered_non_distinct_ids\",\n \"nodes_prepared_ids\",\n \"ways_valid_ids\",\n \"ways_filtered_non_distinct_ids\",\n \"relations_valid_ids\",\n \"relations_ids\",\n ],\n )\n\n filtered_nodes_with_geometry = self._get_filtered_nodes_with_geometry(\n converted_osm_parquet_files, tmp_dir_name\n )\n self._delete_directories(tmp_dir_name, \"nodes_filtered_ids\")\n\n ways_refs_with_nodes_structs = self._get_ways_refs_with_nodes_structs(\n converted_osm_parquet_files, tmp_dir_name\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"nodes_valid_with_tags\",\n ],\n )\n\n filtered_ways_with_linestrings = self._get_filtered_ways_with_linestrings(\n osm_parquet_files=converted_osm_parquet_files,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n tmp_dir_name=tmp_dir_name,\n )\n required_ways_with_linestrings = self._get_required_ways_with_linestrings(\n osm_parquet_files=converted_osm_parquet_files,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n tmp_dir_name=tmp_dir_name,\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"ways_required_grouped\",\n \"ways_required_ids\",\n \"ways_with_unnested_nodes_refs\",\n \"ways_refs_with_nodes_structs\",\n \"required_ways_ids_grouped\",\n \"required_ways_grouped\",\n \"required_ways_tmp\",\n \"filtered_ways_ids_grouped\",\n \"filtered_ways_grouped\",\n \"filtered_ways_tmp\",\n ],\n )\n\n filtered_ways_with_proper_geometry = self._get_filtered_ways_with_proper_geometry(\n converted_osm_parquet_files, filtered_ways_with_linestrings, tmp_dir_name\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"ways_prepared_ids\",\n \"ways_filtered_ids\",\n \"ways_all_with_tags\",\n \"filtered_ways_with_linestrings\",\n ],\n )\n\n filtered_relations_with_geometry = self._get_filtered_relations_with_geometry(\n converted_osm_parquet_files, required_ways_with_linestrings, tmp_dir_name\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"relations_all_with_tags\",\n \"relations_with_unnested_way_refs\",\n \"relations_filtered_ids\",\n \"required_ways_with_linestrings\",\n \"valid_relation_parts\",\n \"relation_inner_parts\",\n \"relation_outer_parts\",\n \"relation_outer_parts_with_holes\",\n \"relation_outer_parts_without_holes\",\n ],\n )\n\n self._concatenate_results_to_geoparquet(\n PbfFileReader.ParsedOSMFeatures(\n nodes=filtered_nodes_with_geometry,\n ways=filtered_ways_with_proper_geometry,\n relations=filtered_relations_with_geometry,\n ),\n tmp_dir_name=tmp_dir_name,\n save_file_path=result_file_path,\n explode_tags=explode_tags,\n )\n\n return result_file_path\n\n def _generate_geoparquet_result_file_path(\n self,\n pbf_file_path: Union[str, Path],\n explode_tags: bool,\n filter_osm_ids: list[str],\n ) -> Path:\n pbf_file_name = Path(pbf_file_path).name.removesuffix(\".osm.pbf\")\n\n osm_filter_tags_hash_part = \"nofilter\"\n if self.tags_filter is not None:\n h = hashlib.new(\"sha256\")\n h.update(json.dumps(self.tags_filter).encode())\n osm_filter_tags_hash_part = h.hexdigest()\n\n clipping_geometry_hash_part = \"noclip\"\n if self.geometry_filter is not None:\n h = hashlib.new(\"sha256\")\n h.update(wktlib.dumps(self.geometry_filter).encode())\n clipping_geometry_hash_part = h.hexdigest()\n\n exploded_tags_part = \"exploded\" if explode_tags else \"compact\"\n\n filter_osm_ids_hash_part = \"\"\n if filter_osm_ids:\n h = hashlib.new(\"sha256\")\n h.update(json.dumps(sorted(set(filter_osm_ids))).encode())\n filter_osm_ids_hash_part = f\"_{h.hexdigest()}\"\n\n result_file_name = (\n f\"{pbf_file_name}_{osm_filter_tags_hash_part}\"\n f\"_{clipping_geometry_hash_part}_{exploded_tags_part}{filter_osm_ids_hash_part}.geoparquet\"\n )\n return Path(self.working_directory) / result_file_name\n\n def _prefilter_elements_ids(\n self, elements: \"duckdb.DuckDBPyRelation\", tmp_dir_name: str, filter_osm_ids: list[str]\n ) -> ConvertedOSMParquetFiles:\n sql_filter = self._generate_osm_tags_sql_filter()\n filtered_tags_clause = self._generate_filtered_tags_clause()\n\n is_intersecting = self.geometry_filter is not None\n\n with TaskProgressSpinner(\"Reading nodes\", \"1\"):\n # NODES - VALID (NV)\n # - select all with kind = 'node'\n # - select all with lat and lon not empty\n nodes_valid_with_tags = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT\n id,\n {filtered_tags_clause},\n lon,\n lat\n FROM ({elements.sql_query()})\n WHERE kind = 'node'\n AND lat IS NOT NULL AND lon IS NOT NULL\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_valid_with_tags\",\n )\n # NODES - INTERSECTING (NI)\n # - select all from NV which intersect given geometry filter\n # NODES - FILTERED (NF)\n # - select all from NI with tags filter\n filter_osm_node_ids_filter = self._generate_elements_filter(filter_osm_ids, \"node\")\n if is_intersecting:\n wkt = cast(BaseGeometry, self.geometry_filter).wkt\n intersection_filter = f\"ST_Intersects(ST_Point(lon, lat), ST_GeomFromText('{wkt}'))\"\n with TaskProgressSpinner(\"Filtering nodes - intersection\", \"2\"):\n nodes_intersecting_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT DISTINCT id FROM ({nodes_valid_with_tags.sql_query()}) n\n WHERE {intersection_filter} = true\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_intersecting_ids\",\n )\n with TaskProgressSpinner(\"Filtering nodes - tags\", \"3\"):\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({nodes_valid_with_tags.sql_query()}) n\n SEMI JOIN ({nodes_intersecting_ids.sql_query()}) ni ON n.id = ni.id\n WHERE tags IS NOT NULL AND cardinality(tags) > 0 AND ({sql_filter})\n AND ({filter_osm_node_ids_filter})\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_filtered_non_distinct_ids\",\n )\n else:\n with TaskProgressSpinner(\"Filtering nodes - intersection\", \"2\"):\n pass\n with TaskProgressSpinner(\"Filtering nodes - tags\", \"3\"):\n nodes_intersecting_ids = nodes_valid_with_tags\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({nodes_valid_with_tags.sql_query()}) n\n WHERE tags IS NOT NULL AND cardinality(tags) > 0 AND ({sql_filter})\n AND ({filter_osm_node_ids_filter})\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_filtered_non_distinct_ids\",\n )\n with TaskProgressSpinner(\"Calculating distinct filtered nodes ids\", \"4\"):\n nodes_filtered_ids = self._calculate_unique_ids_to_parquet(\n Path(tmp_dir_name) / \"nodes_filtered_non_distinct_ids\",\n Path(tmp_dir_name) / \"nodes_filtered_ids\",\n )\n\n with TaskProgressSpinner(\"Reading ways\", \"5\"):\n # WAYS - VALID (WV)\n # - select all with kind = 'way'\n # - select all with more then one ref\n # - join all NV to refs\n # - select all where all refs has been joined (total_refs == found_refs)\n self.connection.sql(f\"\"\"\n SELECT *\n FROM ({elements.sql_query()}) w\n WHERE kind = 'way' AND len(refs) >= 2\n \"\"\").to_view(\"ways\", replace=True)\n ways_all_with_tags = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH filtered_tags AS (\n SELECT id, {filtered_tags_clause}, tags as raw_tags\n FROM ways w\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n )\n SELECT id, tags, raw_tags\n FROM filtered_tags\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_all_with_tags\",\n )\n with TaskProgressSpinner(\"Unnesting ways\", \"6\"):\n ways_with_unnested_nodes_refs = self._sql_to_parquet_file(\n sql_query=\"\"\"\n SELECT w.id, UNNEST(refs) as ref, UNNEST(range(length(refs))) as ref_idx\n FROM ways w\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_with_unnested_nodes_refs\",\n )\n with TaskProgressSpinner(\"Filtering ways - valid refs\", \"7\"):\n ways_valid_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH total_ways_with_nodes_refs AS (\n SELECT id, ref\n FROM ({ways_with_unnested_nodes_refs.sql_query()})\n ),\n unmatched_ways_with_nodes_refs AS (\n SELECT id, ref\n FROM ({ways_with_unnested_nodes_refs.sql_query()}) w\n ANTI JOIN ({nodes_valid_with_tags.sql_query()}) nv ON nv.id = w.ref\n )\n SELECT DISTINCT id\n FROM total_ways_with_nodes_refs\n EXCEPT\n SELECT DISTINCT id\n FROM unmatched_ways_with_nodes_refs\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_valid_ids\",\n )\n\n with TaskProgressSpinner(\"Filtering ways - intersection\", \"8\"):\n # WAYS - INTERSECTING (WI)\n # - select all from WV with joining any from NV on ref\n if is_intersecting:\n ways_intersecting_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT DISTINCT uwr.id\n FROM ({ways_with_unnested_nodes_refs.sql_query()}) uwr\n SEMI JOIN ({ways_valid_ids.sql_query()}) wv ON uwr.id = wv.id\n SEMI JOIN ({nodes_intersecting_ids.sql_query()}) n ON n.id = uwr.ref\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_intersecting_ids\",\n )\n else:\n ways_intersecting_ids = ways_valid_ids\n with TaskProgressSpinner(\"Filtering ways - tags\", \"9\"):\n # WAYS - FILTERED (WF)\n # - select all from WI with tags filter\n filter_osm_way_ids_filter = self._generate_elements_filter(filter_osm_ids, \"way\")\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({ways_all_with_tags.sql_query()}) w\n SEMI JOIN ({ways_intersecting_ids.sql_query()}) wi ON w.id = wi.id\n WHERE ({sql_filter}) AND ({filter_osm_way_ids_filter})\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_filtered_non_distinct_ids\",\n )\n\n with TaskProgressSpinner(\"Calculating distinct filtered ways ids\", \"10\"):\n ways_filtered_ids = self._calculate_unique_ids_to_parquet(\n Path(tmp_dir_name) / \"ways_filtered_non_distinct_ids\",\n Path(tmp_dir_name) / \"ways_filtered_ids\",\n )\n\n with TaskProgressSpinner(\"Reading relations\", \"11\"):\n # RELATIONS - VALID (RV)\n # - select all with kind = 'relation'\n # - select all with more then one ref\n # - select all with type in ['boundary', 'multipolygon']\n # - join all WV to refs\n # - select all where all refs has been joined (total_refs == found_refs)\n self.connection.sql(f\"\"\"\n SELECT *\n FROM ({elements.sql_query()})\n WHERE kind = 'relation' AND len(refs) > 0\n AND list_contains(map_keys(tags), 'type')\n AND list_has_any(map_extract(tags, 'type'), ['boundary', 'multipolygon'])\n \"\"\").to_view(\"relations\", replace=True)\n relations_all_with_tags = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH filtered_tags AS (\n SELECT id, {filtered_tags_clause}\n FROM relations r\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n )\n SELECT id, tags\n FROM filtered_tags\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_all_with_tags\",\n )\n\n with TaskProgressSpinner(\"Unnesting relations\", \"12\"):\n relations_with_unnested_way_refs = self._sql_to_parquet_file(\n sql_query=\"\"\"\n WITH unnested_relation_refs AS (\n SELECT\n r.id,\n UNNEST(refs) as ref,\n UNNEST(ref_types) as ref_type,\n UNNEST(ref_roles) as ref_role,\n UNNEST(range(length(refs))) as ref_idx\n FROM relations r\n )\n SELECT id, ref, ref_role, ref_idx\n FROM unnested_relation_refs\n WHERE ref_type = 'way'\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_with_unnested_way_refs\",\n )\n\n with TaskProgressSpinner(\"Filtering relations - valid refs\", \"13\"):\n relations_valid_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH total_relation_refs AS (\n SELECT id, ref\n FROM ({relations_with_unnested_way_refs.sql_query()}) frr\n ),\n unmatched_relation_refs AS (\n SELECT id, ref\n FROM ({relations_with_unnested_way_refs.sql_query()}) r\n ANTI JOIN ({ways_valid_ids.sql_query()}) wv ON wv.id = r.ref\n )\n SELECT DISTINCT id\n FROM total_relation_refs\n EXCEPT\n SELECT DISTINCT id\n FROM unmatched_relation_refs\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_valid_ids\",\n )\n\n with TaskProgressSpinner(\"Filtering relations - intersection\", \"14\"):\n # RELATIONS - INTERSECTING (RI)\n # - select all from RW with joining any from RV on ref\n if is_intersecting:\n relations_intersecting_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT frr.id\n FROM ({relations_with_unnested_way_refs.sql_query()}) frr\n SEMI JOIN ({relations_valid_ids.sql_query()}) rv ON frr.id = rv.id\n SEMI JOIN ({ways_intersecting_ids.sql_query()}) wi ON wi.id = frr.ref\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_intersecting_ids\",\n )\n else:\n relations_intersecting_ids = relations_valid_ids\n\n with TaskProgressSpinner(\"Filtering relations - tags\", \"15\"):\n # RELATIONS - FILTERED (RF)\n # - select all from RI with tags filter\n filter_osm_relation_ids_filter = self._generate_elements_filter(\n filter_osm_ids, \"relation\"\n )\n\n relations_ids_path = Path(tmp_dir_name) / \"relations_ids\"\n relations_ids_path.mkdir(parents=True, exist_ok=True)\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({relations_all_with_tags.sql_query()}) r\n SEMI JOIN ({relations_intersecting_ids.sql_query()}) ri ON r.id = ri.id\n WHERE ({sql_filter}) AND ({filter_osm_relation_ids_filter})\n \"\"\",\n file_path=relations_ids_path / \"filtered\",\n )\n\n with TaskProgressSpinner(\"Calculating distinct filtered relations ids\", \"16\"):\n relations_filtered_ids = self._calculate_unique_ids_to_parquet(\n relations_ids_path / \"filtered\", Path(tmp_dir_name) / \"relations_filtered_ids\"\n )\n\n ways_prepared_ids_path = Path(tmp_dir_name) / \"ways_prepared_ids\"\n ways_prepared_ids_path.mkdir(parents=True, exist_ok=True)\n\n with TaskProgressSpinner(\"Loading required ways - by relations\", \"17\"):\n # WAYS - REQUIRED (WR)\n # - required - all IDs from WF\n # + all needed to construct relations from RF\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT ref as id\n FROM ({relations_with_unnested_way_refs.sql_query()}) frr\n SEMI JOIN ({relations_filtered_ids.sql_query()}) fri ON fri.id = frr.id\n \"\"\",\n file_path=ways_prepared_ids_path / \"required_by_relations\",\n )\n\n with TaskProgressSpinner(\"Calculating distinct required ways ids\", \"18\"):\n ways_required_ids = self._calculate_unique_ids_to_parquet(\n ways_prepared_ids_path, Path(tmp_dir_name) / \"ways_required_ids\"\n )\n\n return PbfFileReader.ConvertedOSMParquetFiles(\n nodes_valid_with_tags=nodes_valid_with_tags,\n nodes_filtered_ids=nodes_filtered_ids,\n ways_all_with_tags=ways_all_with_tags,\n ways_with_unnested_nodes_refs=ways_with_unnested_nodes_refs,\n ways_required_ids=ways_required_ids,\n ways_filtered_ids=ways_filtered_ids,\n relations_all_with_tags=relations_all_with_tags,\n relations_with_unnested_way_refs=relations_with_unnested_way_refs,\n relations_filtered_ids=relations_filtered_ids,\n )\n\n def _delete_directories(\n self, tmp_dir_name: Union[Path, str], directories: Union[str, list[str]]\n ) -> None:\n if isinstance(directories, str):\n directories = [directories]\n for directory in directories:\n directory_path = Path(tmp_dir_name) / directory\n if not directory_path.exists():\n continue\n shutil.rmtree(directory_path)\n\n def _generate_osm_tags_sql_filter(self) -> str:\n \"\"\"Prepare features filter clauses based on tags filter.\"\"\"\n filter_clauses = [\"(1=1)\"]\n\n if self.merged_tags_filter:\n filter_clauses.clear()\n\n for filter_tag_key, filter_tag_value in self.merged_tags_filter.items():\n if isinstance(filter_tag_value, bool) and filter_tag_value:\n filter_clauses.append(f\"(list_contains(map_keys(tags), '{filter_tag_key}'))\")\n elif isinstance(filter_tag_value, str):\n escaped_value = self._sql_escape(filter_tag_value)\n filter_clauses.append(\n f\"list_extract(map_extract(tags, '{filter_tag_key}'), 1) =\"\n f\" '{escaped_value}'\"\n )\n elif isinstance(filter_tag_value, list) and filter_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in filter_tag_value]\n filter_clauses.append(\n f\"list_extract(map_extract(tags, '{filter_tag_key}'), 1) IN\"\n f\" ({', '.join(values_list)})\"\n )\n\n return \" OR \".join(filter_clauses)\n\n def _generate_filtered_tags_clause(self) -> str:\n \"\"\"Prepare filtered tags clause by removing tags commonly ignored by OGR.\"\"\"\n tags_to_ignore = [\n \"area\",\n \"created_by\",\n \"converted_by\",\n \"source\",\n \"time\",\n \"ele\",\n \"note\",\n \"todo\",\n \"fixme\",\n \"FIXME\",\n \"openGeoDB:\",\n ]\n escaped_tags_to_ignore = [f\"'{tag}'\" for tag in tags_to_ignore]\n\n return f\"\"\"\n map_from_entries(\n [\n tag_entry\n for tag_entry in map_entries(tags)\n if not tag_entry.key in ({','.join(escaped_tags_to_ignore)})\n and not starts_with(tag_entry.key, 'openGeoDB:')\n ]\n ) as tags\n \"\"\"\n\n def _generate_elements_filter(\n self, filter_osm_ids: list[str], element_type: Literal[\"node\", \"way\", \"relation\"]\n ) -> str:\n filter_osm_relation_ids = [\n osm_id.replace(f\"{element_type}/\", \"\")\n for osm_id in filter_osm_ids\n if osm_id.startswith(f\"{element_type}/\")\n ]\n if not filter_osm_ids:\n filter_osm_ids_filter = \"1=1\"\n elif filter_osm_relation_ids:\n filter_osm_ids_filter = f\"id in ({','.join(filter_osm_relation_ids)})\"\n else:\n filter_osm_ids_filter = \"id IS NULL\"\n\n return filter_osm_ids_filter\n\n def _sql_escape(self, value: str) -> str:\n \"\"\"Escape value for SQL query.\"\"\"\n return value.replace(\"'\", \"''\")\n\n def _sql_to_parquet_file(self, sql_query: str, file_path: Path) -> \"duckdb.DuckDBPyRelation\":\n relation = self.connection.sql(sql_query)\n return self._save_parquet_file(relation, file_path)\n\n def _save_parquet_file(\n self, relation: \"duckdb.DuckDBPyRelation\", file_path: Path\n ) -> \"duckdb.DuckDBPyRelation\":\n self.connection.sql(f\"\"\"\n COPY (\n SELECT * FROM ({relation.sql_query()})\n ) TO '{file_path}' (FORMAT 'parquet', PER_THREAD_OUTPUT true, ROW_GROUP_SIZE 25000)\n \"\"\")\n return self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{file_path}/**')\n \"\"\")\n\n def _calculate_unique_ids_to_parquet(\n self, file_path: Path, result_path: Optional[Path] = None\n ) -> \"duckdb.DuckDBPyRelation\":\n if result_path is None:\n result_path = file_path / \"distinct\"\n\n self.connection.sql(f\"\"\"\n COPY (\n SELECT id FROM read_parquet('{file_path}/**') GROUP BY id\n ) TO '{result_path}' (FORMAT 'parquet', PER_THREAD_OUTPUT true, ROW_GROUP_SIZE 25000)\n \"\"\")\n\n return self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{result_path}/**')\n \"\"\")\n\n def _get_filtered_nodes_with_geometry(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n nodes_with_geometry = self.connection.sql(f\"\"\"\n SELECT\n n.id,\n n.tags,\n ST_Point(round(n.lon, 7), round(n.lat, 7)) geometry\n FROM ({osm_parquet_files.nodes_valid_with_tags.sql_query()}) n\n SEMI JOIN ({osm_parquet_files.nodes_filtered_ids.sql_query()}) fn ON n.id = fn.id\n \"\"\")\n nodes_parquet = self._save_parquet_file_with_geometry(\n relation=nodes_with_geometry,\n file_path=Path(tmp_dir_name) / \"filtered_nodes_with_geometry\",\n step_name=\"Saving filtered nodes with geometries\",\n step_number=\"19\",\n )\n return nodes_parquet\n\n def _get_ways_refs_with_nodes_structs(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n ways_refs_with_nodes_structs = self.connection.sql(f\"\"\"\n SELECT\n w.id,\n w.ref,\n w.ref_idx,\n struct_pack(x := round(n.lon, 7), y := round(n.lat, 7))::POINT_2D point\n FROM ({osm_parquet_files.nodes_valid_with_tags.sql_query()}) n\n JOIN ({osm_parquet_files.ways_with_unnested_nodes_refs.sql_query()}) w ON w.ref = n.id\n \"\"\")\n with TaskProgressSpinner(\"Saving required nodes with structs\", \"20\"):\n ways_refs_parquet = self._save_parquet_file(\n relation=ways_refs_with_nodes_structs,\n file_path=Path(tmp_dir_name) / \"ways_refs_with_nodes_structs\",\n )\n return ways_refs_parquet\n\n def _get_filtered_ways_with_linestrings(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n ways_refs_with_nodes_structs: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n grouped_ways_path = Path(tmp_dir_name) / \"filtered_ways_grouped\"\n grouped_ways_tmp_path = Path(tmp_dir_name) / \"filtered_ways_tmp\"\n destination_dir_path = Path(tmp_dir_name) / \"filtered_ways_with_linestrings\"\n\n with TaskProgressSpinner(\"Grouping filtered ways\", \"21\"):\n groups = self._group_ways(\n ways_ids=osm_parquet_files.ways_filtered_ids,\n destination_dir_path=destination_dir_path,\n grouped_ways_tmp_path=grouped_ways_tmp_path,\n grouped_ways_path=grouped_ways_path,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n )\n\n with TaskProgressBar(\"Saving filtered ways with linestrings\", \"22\") as bar:\n self._construct_ways_linestrings(\n bar=bar,\n groups=groups,\n destination_dir_path=destination_dir_path,\n grouped_ways_path=grouped_ways_path,\n )\n\n ways_parquet = self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{destination_dir_path}/**')\n \"\"\")\n return ways_parquet\n\n def _get_required_ways_with_linestrings(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n ways_refs_with_nodes_structs: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n grouped_ways_path = Path(tmp_dir_name) / \"required_ways_grouped\"\n grouped_ways_tmp_path = Path(tmp_dir_name) / \"required_ways_tmp\"\n destination_dir_path = Path(tmp_dir_name) / \"required_ways_with_linestrings\"\n\n with TaskProgressSpinner(\"Grouping required ways\", \"23\"):\n groups = self._group_ways(\n ways_ids=osm_parquet_files.ways_required_ids,\n destination_dir_path=destination_dir_path,\n grouped_ways_tmp_path=grouped_ways_tmp_path,\n grouped_ways_path=grouped_ways_path,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n )\n\n with TaskProgressBar(\"Saving required ways with linestrings\", \"24\") as bar:\n self._construct_ways_linestrings(\n bar=bar,\n groups=groups,\n destination_dir_path=destination_dir_path,\n grouped_ways_path=grouped_ways_path,\n )\n\n ways_parquet = self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{destination_dir_path}/**')\n \"\"\")\n return ways_parquet\n\n def _group_ways(\n self,\n ways_ids: \"duckdb.DuckDBPyRelation\",\n ways_refs_with_nodes_structs: \"duckdb.DuckDBPyRelation\",\n destination_dir_path: Path,\n grouped_ways_tmp_path: Path,\n grouped_ways_path: Path,\n ) -> int:\n total_required_ways = ways_ids.count(\"id\").fetchone()[0]\n\n destination_dir_path.mkdir(parents=True, exist_ok=True)\n grouped_ways_tmp_path.mkdir(parents=True, exist_ok=True)\n\n if total_required_ways == 0:\n empty_file_path = str(destination_dir_path / \"empty.parquet\")\n self.connection.sql(\"CREATE OR REPLACE TABLE x(id STRING, linestring LINESTRING_2D);\")\n self.connection.table(\"x\").to_parquet(empty_file_path)\n return -1\n\n groups = int(floor(total_required_ways / self.rows_per_bucket))\n\n ways_ids_grouped_relation = self.connection.sql(f\"\"\"\n SELECT id,\n floor(\n row_number() OVER () / {self.rows_per_bucket}\n )::INTEGER as \"group\",\n FROM ({ways_ids.sql_query()})\n \"\"\")\n grouped_ways_ids_with_group_path = grouped_ways_tmp_path / \"ids_with_group\"\n ways_ids_grouped_relation_parquet = self._save_parquet_file(\n relation=ways_ids_grouped_relation, file_path=grouped_ways_ids_with_group_path\n )\n\n ways_with_nodes_points_relation = self.connection.sql(f\"\"\"\n SELECT\n w.id, w.point, w.ref_idx, rw.\"group\"\n FROM ({ways_ids_grouped_relation_parquet.sql_query()}) rw\n JOIN ({ways_refs_with_nodes_structs.sql_query()}) w\n ON rw.id = w.id\n \"\"\")\n\n grouped_ways_ids_with_points_path = grouped_ways_tmp_path / \"ids_with_points\"\n ways_with_nodes_points_relation_parquet = self._save_parquet_file(\n relation=ways_with_nodes_points_relation, file_path=grouped_ways_ids_with_points_path\n )\n\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n id, point, ref_idx, \"group\"\n FROM ({ways_with_nodes_points_relation_parquet.sql_query()}) w\n ) TO '{grouped_ways_path}'\n (FORMAT 'parquet', PARTITION_BY (\"group\"), ROW_GROUP_SIZE 25000)\n \"\"\")\n\n return groups\n\n def _construct_ways_linestrings(\n self,\n bar: TaskProgressBar,\n groups: int,\n destination_dir_path: Path,\n grouped_ways_path: Path,\n ) -> None:\n grouped_ways_path.mkdir(parents=True, exist_ok=True)\n\n for group in bar.track(range(groups + 1)):\n current_ways_group_path = grouped_ways_path / f\"group={group}\"\n current_ways_group_relation = self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{current_ways_group_path}/**')\n \"\"\")\n\n ways_with_linestrings = self.connection.sql(f\"\"\"\n SELECT id, list(point ORDER BY ref_idx ASC)::LINESTRING_2D linestring\n FROM ({current_ways_group_relation.sql_query()})\n GROUP BY id\n \"\"\")\n self._save_parquet_file(\n relation=ways_with_linestrings,\n file_path=destination_dir_path / f\"group={group}\",\n )\n\n def _get_filtered_ways_with_proper_geometry(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n required_ways_with_linestrings: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n osm_way_polygon_features_filter_clauses = [\n \"list_contains(map_keys(raw_tags), 'area') AND \"\n \"list_extract(map_extract(raw_tags, 'area'), 1) = 'yes'\"\n ]\n\n for osm_tag_key in self.osm_way_polygon_features_config.all:\n osm_way_polygon_features_filter_clauses.append(\n f\"list_contains(map_keys(raw_tags), '{osm_tag_key}')\"\n )\n\n for osm_tag_key, osm_tag_values in self.osm_way_polygon_features_config.allowlist.items():\n escaped_values = \",\".join(\n [f\"'{self._sql_escape(osm_tag_value)}'\" for osm_tag_value in osm_tag_values]\n )\n osm_way_polygon_features_filter_clauses.append(\n f\"list_contains(map_keys(raw_tags), '{osm_tag_key}') AND\"\n f\" list_has_any(map_extract(raw_tags, '{osm_tag_key}'), [{escaped_values}])\"\n )\n\n for osm_tag_key, osm_tag_values in self.osm_way_polygon_features_config.denylist.items():\n escaped_values = \",\".join(\n [f\"'{self._sql_escape(osm_tag_value)}'\" for osm_tag_value in osm_tag_values]\n )\n osm_way_polygon_features_filter_clauses.append(\n f\"list_contains(map_keys(raw_tags), '{osm_tag_key}') AND NOT\"\n f\" list_has_any(map_extract(raw_tags, '{osm_tag_key}'), [{escaped_values}])\"\n )\n\n ways_with_proper_geometry = self.connection.sql(f\"\"\"\n WITH required_ways_with_linestrings AS (\n SELECT\n w.id,\n w.tags,\n w_l.linestring,\n -- Filter below is based on `_is_closed_way_a_polygon` function from OSMnx\n -- Filter values are built dynamically from a config.\n (\n -- if first and last nodes are the same\n ST_Equals(linestring[1]::POINT_2D, linestring[-1]::POINT_2D)\n -- if the element doesn't have any tags leave it as a Linestring\n AND raw_tags IS NOT NULL\n -- if the element is specifically tagged 'area':'no' -> LineString\n AND NOT (\n list_contains(map_keys(raw_tags), 'area')\n AND list_extract(map_extract(raw_tags, 'area'), 1) = 'no'\n )\n AND ({' OR '.join(osm_way_polygon_features_filter_clauses)})\n ) AS is_polygon\n FROM ({required_ways_with_linestrings.sql_query()}) w_l\n SEMI JOIN ({osm_parquet_files.ways_filtered_ids.sql_query()}) fw ON w_l.id = fw.id\n JOIN ({osm_parquet_files.ways_all_with_tags.sql_query()}) w ON w.id = w_l.id\n ),\n proper_geometries AS (\n SELECT\n id,\n tags,\n (CASE\n WHEN is_polygon\n THEN linestring_to_polygon_wkt(linestring)\n ELSE linestring_to_linestring_wkt(linestring)\n END)::GEOMETRY AS geometry\n FROM\n required_ways_with_linestrings w\n )\n SELECT id, tags, geometry FROM proper_geometries\n \"\"\")\n ways_parquet = self._save_parquet_file_with_geometry(\n relation=ways_with_proper_geometry,\n file_path=Path(tmp_dir_name) / \"filtered_ways_with_geometry\",\n step_name=\"Saving filtered ways with geometries\",\n step_number=\"25\",\n )\n return ways_parquet\n\n def _get_filtered_relations_with_geometry(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n required_ways_with_linestrings: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n valid_relation_parts = self.connection.sql(f\"\"\"\n WITH unnested_relations AS (\n SELECT\n r.id,\n COALESCE(r.ref_role, 'outer') as ref_role,\n r.ref,\n linestring_to_linestring_wkt(w.linestring)::GEOMETRY as geometry\n FROM ({osm_parquet_files.relations_with_unnested_way_refs.sql_query()}) r\n SEMI JOIN ({osm_parquet_files.relations_filtered_ids.sql_query()}) fr\n ON r.id = fr.id\n JOIN ({required_ways_with_linestrings.sql_query()}) w\n ON w.id = r.ref\n ORDER BY r.id, r.ref_idx\n ),\n any_outer_refs AS (\n SELECT id, bool_or(ref_role == 'outer') any_outer_refs\n FROM unnested_relations\n GROUP BY id\n ),\n relations_with_geometries AS (\n SELECT\n x.id,\n CASE WHEN aor.any_outer_refs\n THEN x.ref_role ELSE 'outer'\n END as ref_role,\n x.geom geometry,\n row_number() OVER (PARTITION BY x.id) as geometry_id\n FROM (\n SELECT\n id,\n ref_role,\n UNNEST(\n ST_Dump(ST_LineMerge(ST_Collect(list(geometry)))), recursive := true\n ),\n FROM unnested_relations\n GROUP BY id, ref_role\n ) x\n JOIN any_outer_refs aor ON aor.id = x.id\n WHERE ST_NPoints(geom) >= 4\n ),\n valid_relations AS (\n SELECT id, is_valid\n FROM (\n SELECT\n id,\n bool_and(\n ST_Equals(ST_StartPoint(geometry), ST_EndPoint(geometry))\n ) is_valid\n FROM relations_with_geometries\n GROUP BY id\n )\n WHERE is_valid = true\n )\n SELECT * FROM relations_with_geometries\n SEMI JOIN valid_relations ON relations_with_geometries.id = valid_relations.id\n \"\"\")\n valid_relation_parts_parquet = self._save_parquet_file_with_geometry(\n relation=valid_relation_parts,\n file_path=Path(tmp_dir_name) / \"valid_relation_parts\",\n step_name=\"Saving valid relations parts\",\n step_number=\"26\",\n )\n relation_inner_parts = self.connection.sql(f\"\"\"\n SELECT id, geometry_id, ST_MakePolygon(geometry) geometry\n FROM ({valid_relation_parts_parquet.sql_query()})\n WHERE ref_role = 'inner'\n \"\"\")\n relation_inner_parts_parquet = self._save_parquet_file_with_geometry(\n relation=relation_inner_parts,\n file_path=Path(tmp_dir_name) / \"relation_inner_parts\",\n fix_geometries=True,\n step_name=\"Saving relations inner parts\",\n step_number=\"27\",\n )\n relation_outer_parts = self.connection.sql(f\"\"\"\n SELECT id, geometry_id, ST_MakePolygon(geometry) geometry\n FROM ({valid_relation_parts_parquet.sql_query()})\n WHERE ref_role = 'outer'\n \"\"\")\n relation_outer_parts_parquet = self._save_parquet_file_with_geometry(\n relation=relation_outer_parts,\n file_path=Path(tmp_dir_name) / \"relation_outer_parts\",\n fix_geometries=True,\n step_name=\"Saving relations outer parts\",\n step_number=\"28\",\n )\n relation_outer_parts_with_holes = self.connection.sql(f\"\"\"\n SELECT\n og.id,\n og.geometry_id,\n ST_Difference(any_value(og.geometry), ST_Union_Agg(ig.geometry)) geometry\n FROM ({relation_outer_parts_parquet.sql_query()}) og\n JOIN ({relation_inner_parts_parquet.sql_query()}) ig\n ON og.id = ig.id AND ST_WITHIN(ig.geometry, og.geometry)\n GROUP BY og.id, og.geometry_id\n \"\"\")\n relation_outer_parts_with_holes_parquet = self._save_parquet_file_with_geometry(\n relation=relation_outer_parts_with_holes,\n file_path=Path(tmp_dir_name) / \"relation_outer_parts_with_holes\",\n step_name=\"Saving relations outer parts with holes\",\n step_number=\"29\",\n )\n relation_outer_parts_without_holes = self.connection.sql(f\"\"\"\n SELECT\n og.id,\n og.geometry_id,\n og.geometry\n FROM ({relation_outer_parts_parquet.sql_query()}) og\n ANTI JOIN ({relation_outer_parts_with_holes_parquet.sql_query()}) ogwh\n ON og.id = ogwh.id AND og.geometry_id = ogwh.geometry_id\n \"\"\")\n relation_outer_parts_without_holes_parquet = self._save_parquet_file_with_geometry(\n relation=relation_outer_parts_without_holes,\n file_path=Path(tmp_dir_name) / \"relation_outer_parts_without_holes\",\n step_name=\"Saving relations outer parts without holes\",\n step_number=\"30\",\n )\n relations_with_geometry = self.connection.sql(f\"\"\"\n WITH unioned_outer_geometries AS (\n SELECT id, geometry\n FROM ({relation_outer_parts_with_holes_parquet.sql_query()})\n UNION ALL\n SELECT id, geometry\n FROM ({relation_outer_parts_without_holes_parquet.sql_query()})\n ),\n final_geometries AS (\n SELECT id, ST_Union_Agg(geometry) geometry\n FROM unioned_outer_geometries\n GROUP BY id\n )\n SELECT r_g.id, r.tags, r_g.geometry\n FROM final_geometries r_g\n JOIN ({osm_parquet_files.relations_all_with_tags.sql_query()}) r\n ON r.id = r_g.id\n \"\"\")\n relations_parquet = self._save_parquet_file_with_geometry(\n relation=relations_with_geometry,\n file_path=Path(tmp_dir_name) / \"filtered_relations_with_geometry\",\n step_name=\"Saving filtered relations with geometries\",\n step_number=\"31\",\n )\n return relations_parquet\n\n def _save_parquet_file_with_geometry(\n self,\n relation: \"duckdb.DuckDBPyRelation\",\n file_path: Path,\n step_name: str,\n step_number: str,\n fix_geometries: bool = False,\n ) -> \"duckdb.DuckDBPyRelation\":\n if not fix_geometries:\n with TaskProgressSpinner(step_name, step_number):\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb\n FROM ({relation.sql_query()})\n ) TO '{file_path}' (\n FORMAT 'parquet',\n PER_THREAD_OUTPUT true,\n ROW_GROUP_SIZE 25000\n )\n \"\"\")\n else:\n valid_path = file_path / \"valid\"\n invalid_path = file_path / \"invalid\"\n fixed_path = file_path / \"fixed\"\n\n valid_path.mkdir(parents=True, exist_ok=True)\n invalid_path.mkdir(parents=True, exist_ok=True)\n fixed_path.mkdir(parents=True, exist_ok=True)\n\n # Save valid features\n with TaskProgressSpinner(f\"{step_name} - valid geometries\", f\"{step_number}.1\"):\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb\n FROM ({relation.sql_query()})\n WHERE ST_IsValid(geometry)\n ) TO '{valid_path}' (\n FORMAT 'parquet',\n PER_THREAD_OUTPUT true,\n ROW_GROUP_SIZE 25000\n )\n \"\"\")\n\n # Save invalid features\n with TaskProgressSpinner(f\"{step_name} - invalid geometries\", f\"{step_number}.2\"):\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb,\n floor(\n row_number() OVER () / {self.rows_per_bucket}\n )::INTEGER as \"group\",\n FROM ({relation.sql_query()})\n WHERE NOT ST_IsValid(geometry)\n ) TO '{invalid_path}' (\n FORMAT 'parquet', PARTITION_BY (\"group\"), ROW_GROUP_SIZE 25000\n )\n \"\"\")\n\n # Fix invalid features\n total_groups = 0\n while (invalid_path / f\"group={total_groups}\").exists():\n total_groups += 1\n\n if total_groups > 0:\n with TaskProgressBar(\n f\"{step_name} - fixing invalid geometries\", f\"{step_number}.3\"\n ) as bar:\n for group_id in bar.track(range(total_groups)):\n current_invalid_features_group_path = invalid_path / f\"group={group_id}\"\n current_invalid_features_group_table = pq.read_table(\n current_invalid_features_group_path\n ).drop(\"group\")\n valid_geometry_column = ga.as_wkb(\n ga.to_geopandas(\n ga.with_crs(\n current_invalid_features_group_table.column(\"geometry_wkb\"),\n WGS84_CRS,\n )\n ).make_valid(),\n )\n current_invalid_features_group_table = (\n current_invalid_features_group_table.drop(\"geometry_wkb\")\n )\n\n current_invalid_features_group_table = (\n current_invalid_features_group_table.append_column(\n \"geometry_wkb\", valid_geometry_column\n )\n )\n pq.write_table(\n current_invalid_features_group_table,\n fixed_path / f\"data_{group_id}.parquet\",\n )\n\n self._delete_directories(invalid_path.parent, [\"invalid\"])\n\n return self.connection.sql(f\"\"\"\n SELECT * EXCLUDE (geometry_wkb), ST_GeomFromWKB(geometry_wkb) geometry\n FROM read_parquet('{file_path}/**')\n \"\"\")\n\n def _concatenate_results_to_geoparquet(\n self,\n parsed_data: ParsedOSMFeatures,\n tmp_dir_name: str,\n save_file_path: Path,\n explode_tags: bool,\n ) -> None:\n select_clauses = [\n *self._generate_osm_tags_sql_select(parsed_data, explode_tags),\n \"geometry\",\n ]\n\n node_select_clauses = [\"'node/' || id as feature_id\", *select_clauses]\n way_select_clauses = [\"'way/' || id as feature_id\", *select_clauses]\n relation_select_clauses = [\"'relation/' || id as feature_id\", *select_clauses]\n\n unioned_features = self.connection.sql(f\"\"\"\n SELECT {', '.join(node_select_clauses)}\n FROM ({parsed_data.nodes.sql_query()}) n\n UNION ALL\n SELECT {', '.join(way_select_clauses)}\n FROM ({parsed_data.ways.sql_query()}) w\n UNION ALL\n SELECT {', '.join(relation_select_clauses)}\n FROM ({parsed_data.relations.sql_query()}) r\n \"\"\")\n\n grouped_features = self._parse_features_relation_to_groups(unioned_features, explode_tags)\n\n valid_features_full_relation = self.connection.sql(f\"\"\"\n SELECT * FROM ({grouped_features.sql_query()})\n WHERE ST_IsValid(geometry)\n \"\"\")\n\n valid_features_parquet_path = Path(tmp_dir_name) / \"osm_valid_elements\"\n valid_features_parquet_relation = self._save_parquet_file_with_geometry(\n valid_features_full_relation,\n valid_features_parquet_path,\n step_name=\"Saving valid features\",\n step_number=\"32.1\",\n )\n\n valid_features_parquet_table = pq.read_table(valid_features_parquet_path)\n\n is_empty = valid_features_parquet_table.num_rows == 0\n\n if not is_empty:\n geometry_column = ga.as_wkb(\n ga.with_crs(valid_features_parquet_table.column(\"geometry_wkb\"), WGS84_CRS)\n )\n else:\n geometry_column = ga.as_wkb(gpd.GeoSeries([], crs=WGS84_CRS))\n\n valid_features_parquet_table = valid_features_parquet_table.append_column(\n GEOMETRY_COLUMN, geometry_column\n ).drop(\"geometry_wkb\")\n\n parquet_tables = [valid_features_parquet_table]\n\n invalid_features_full_relation = self.connection.sql(f\"\"\"\n SELECT * FROM ({grouped_features.sql_query()}) a\n ANTI JOIN ({valid_features_parquet_relation.sql_query()}) b\n ON a.feature_id = b.feature_id\n \"\"\")\n\n total_nodes = parsed_data.nodes.count(\"id\").fetchone()[0]\n total_ways = parsed_data.ways.count(\"id\").fetchone()[0]\n total_relations = parsed_data.relations.count(\"id\").fetchone()[0]\n total_features = total_nodes + total_ways + total_relations\n\n valid_features = valid_features_parquet_relation.count(\"feature_id\").fetchone()[0]\n\n invalid_features = total_features - valid_features\n\n if invalid_features > 0:\n with TaskProgressSpinner(\"Grouping invalid features\", \"32.2\"):\n groups = floor(invalid_features / self.rows_per_bucket)\n grouped_invalid_features_result_parquet = (\n Path(tmp_dir_name) / \"osm_invalid_elements_grouped\"\n )\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb,\n floor(\n row_number() OVER () / {self.rows_per_bucket}\n )::INTEGER as \"group\",\n FROM ({invalid_features_full_relation.sql_query()})\n ) TO '{grouped_invalid_features_result_parquet}'\n (FORMAT 'parquet', PARTITION_BY (\"group\"), ROW_GROUP_SIZE 25000)\n \"\"\")\n\n with TaskProgressBar(\"Fixing invalid features\", \"32.3\") as bar:\n for group in bar.track(range(groups + 1)):\n current_invalid_features_group_path = (\n grouped_invalid_features_result_parquet / f\"group={group}\"\n )\n current_invalid_features_group_table = pq.read_table(\n current_invalid_features_group_path\n ).drop(\"group\")\n valid_geometry_column = ga.as_wkb(\n ga.to_geopandas(\n ga.with_crs(\n current_invalid_features_group_table.column(\"geometry_wkb\"),\n WGS84_CRS,\n )\n ).make_valid()\n )\n\n current_invalid_features_group_table = (\n current_invalid_features_group_table.append_column(\n GEOMETRY_COLUMN, valid_geometry_column\n )\n )\n current_invalid_features_group_table = (\n current_invalid_features_group_table.drop(\"geometry_wkb\")\n )\n parquet_tables.append(current_invalid_features_group_table)\n\n joined_parquet_table: pa.Table = pa.concat_tables(parquet_tables)\n\n is_empty = joined_parquet_table.num_rows == 0\n\n empty_columns = []\n for column_name in joined_parquet_table.column_names:\n if column_name in (FEATURES_INDEX, GEOMETRY_COLUMN):\n continue\n if (\n is_empty\n or pa.compute.all(\n pa.compute.is_null(joined_parquet_table.column(column_name))\n ).as_py()\n ):\n empty_columns.append(column_name)\n\n if empty_columns:\n joined_parquet_table = joined_parquet_table.drop(empty_columns)\n\n with TaskProgressSpinner(\"Saving final geoparquet file\", \"33\"):\n io.write_geoparquet_table( # type: ignore\n joined_parquet_table, save_file_path, primary_geometry_column=GEOMETRY_COLUMN\n )\n\n def _generate_osm_tags_sql_select(\n self, parsed_data: ParsedOSMFeatures, explode_tags: bool\n ) -> list[str]:\n \"\"\"Prepare features filter clauses based on tags filter.\"\"\"\n osm_tag_keys_select_clauses = []\n\n # TODO: elif keep other tags\n if not self.merged_tags_filter and not explode_tags:\n osm_tag_keys_select_clauses = [\"tags\"]\n elif not self.merged_tags_filter and explode_tags:\n osm_tag_keys = set()\n for elements in (\n parsed_data.nodes,\n parsed_data.ways,\n parsed_data.relations,\n ):\n found_tag_keys = [row[0] for row in self.connection.sql(f\"\"\"\n SELECT DISTINCT UNNEST(map_keys(tags)) tag_key\n FROM ({elements.sql_query()})\n \"\"\").fetchall()]\n osm_tag_keys.update(found_tag_keys)\n osm_tag_keys_select_clauses = [\n f\"list_extract(map_extract(tags, '{osm_tag_key}'), 1) as \\\"{osm_tag_key}\\\"\"\n for osm_tag_key in sorted(list(osm_tag_keys))\n ]\n elif self.merged_tags_filter and not explode_tags:\n filter_tag_clauses = []\n for filter_tag_key, filter_tag_value in self.merged_tags_filter.items():\n if isinstance(filter_tag_value, bool) and filter_tag_value:\n filter_tag_clauses.append(f\"tag_entry.key = '{filter_tag_key}'\")\n elif isinstance(filter_tag_value, str):\n escaped_value = self._sql_escape(filter_tag_value)\n filter_tag_clauses.append(\n f\"(tag_entry.key = '{filter_tag_key}' AND tag_entry.value =\"\n f\" '{escaped_value}')\"\n )\n elif isinstance(filter_tag_value, list) and filter_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in filter_tag_value]\n filter_tag_clauses.append(\n f\"(tag_entry.key = '{filter_tag_key}' AND tag_entry.value IN\"\n f\" ({', '.join(values_list)}))\"\n )\n osm_tag_keys_select_clauses = [f\"\"\"\n map_from_entries(\n [\n tag_entry\n for tag_entry in map_entries(tags)\n if {\" OR \".join(filter_tag_clauses)}\n ]\n ) as tags\n \"\"\"]\n elif self.merged_tags_filter and explode_tags:\n for filter_tag_key, filter_tag_value in self.merged_tags_filter.items():\n if isinstance(filter_tag_value, bool) and filter_tag_value:\n osm_tag_keys_select_clauses.append(\n f\"list_extract(map_extract(tags, '{filter_tag_key}'), 1) as\"\n f' \"{filter_tag_key}\"'\n )\n elif isinstance(filter_tag_value, str):\n escaped_value = self._sql_escape(filter_tag_value)\n osm_tag_keys_select_clauses.append(f\"\"\"\n CASE WHEN list_extract(\n map_extract(tags, '{filter_tag_key}'), 1\n ) = '{escaped_value}'\n THEN '{escaped_value}'\n ELSE NULL\n END as \"{filter_tag_key}\"\n \"\"\")\n elif isinstance(filter_tag_value, list) and filter_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in filter_tag_value]\n osm_tag_keys_select_clauses.append(f\"\"\"\n CASE WHEN list_extract(\n map_extract(tags, '{filter_tag_key}'), 1\n ) IN ({', '.join(values_list)})\n THEN list_extract(map_extract(tags, '{filter_tag_key}'), 1)\n ELSE NULL\n END as \"{filter_tag_key}\"\n \"\"\")\n\n if len(osm_tag_keys_select_clauses) > 100:\n warnings.warn(\n \"Select clause contains more than 100 columns\"\n f\" (found {len(osm_tag_keys_select_clauses)} columns).\"\n \" Query might fail with insufficient memory resources.\"\n \" Consider applying more restrictive OsmTagsFilter for parsing.\",\n stacklevel=1,\n )\n\n return osm_tag_keys_select_clauses\n\n def _parse_features_relation_to_groups(\n self,\n features_relation: \"duckdb.DuckDBPyRelation\",\n explode_tags: bool,\n ) -> \"duckdb.DuckDBPyRelation\":\n \"\"\"\n Optionally group raw OSM features into groups defined in `GroupedOsmTagsFilter`.\n\n Creates new features based on definition from `GroupedOsmTagsFilter`.\n Returns transformed DuckDB relation with columns based on group names from the filter.\n Values are built by concatenation of matching tag key and value with\n an equal sign (eg. amenity=parking). Since many tags can match a definition\n of a single group, a first match is used as a feature value.\n\n Args:\n features_relation (duckdb.DuckDBPyRelation): Generated features from the loader.\n explode_tags (bool): Whether to split tags into columns based on OSM tag keys.\n\n Returns:\n duckdb.DuckDBPyRelation: Parsed features_relation.\n \"\"\"\n if not self.tags_filter or not is_expected_type(self.tags_filter, GroupedOsmTagsFilter):\n return features_relation\n\n grouped_features_relation: \"duckdb.DuckDBPyRelation\"\n grouped_tags_filter = cast(GroupedOsmTagsFilter, self.tags_filter)\n\n if explode_tags:\n case_clauses = []\n for group_name in sorted(grouped_tags_filter.keys()):\n osm_filter = grouped_tags_filter[group_name]\n case_when_clauses = []\n for osm_tag_key, osm_tag_value in osm_filter.items():\n if isinstance(osm_tag_value, bool) and osm_tag_value:\n case_when_clauses.append(\n f\"WHEN \\\"{osm_tag_key}\\\" IS NOT NULL THEN '{osm_tag_key}=' ||\"\n f' \"{osm_tag_key}\"'\n )\n elif isinstance(osm_tag_value, str):\n escaped_value = self._sql_escape(osm_tag_value)\n case_when_clauses.append(\n f\"WHEN \\\"{osm_tag_key}\\\" = '{escaped_value}' THEN '{osm_tag_key}=' ||\"\n f' \"{osm_tag_key}\"'\n )\n elif isinstance(osm_tag_value, list) and osm_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in osm_tag_value]\n case_when_clauses.append(\n f\"WHEN \\\"{osm_tag_key}\\\" IN ({', '.join(values_list)}) THEN\"\n f\" '{osm_tag_key}=' || \\\"{osm_tag_key}\\\"\"\n )\n case_clause = f'CASE {\" \".join(case_when_clauses)} END AS \"{group_name}\"'\n case_clauses.append(case_clause)\n\n joined_case_clauses = \", \".join(case_clauses)\n grouped_features_relation = self.connection.sql(f\"\"\"\n SELECT feature_id, {joined_case_clauses}, geometry\n FROM ({features_relation.sql_query()})\n \"\"\")\n else:\n case_clauses = []\n group_names = sorted(grouped_tags_filter.keys())\n for group_name in group_names:\n osm_filter = grouped_tags_filter[group_name]\n case_when_clauses = []\n for osm_tag_key, osm_tag_value in osm_filter.items():\n element_clause = f\"element_at(tags, '{osm_tag_key}')[1]\"\n if isinstance(osm_tag_value, bool) and osm_tag_value:\n case_when_clauses.append(\n f\"WHEN {element_clause} IS NOT NULL THEN '{osm_tag_key}=' ||\"\n f\" {element_clause}\"\n )\n elif isinstance(osm_tag_value, str):\n escaped_value = self._sql_escape(osm_tag_value)\n case_when_clauses.append(\n f\"WHEN {element_clause} = '{escaped_value}' THEN '{osm_tag_key}=' ||\"\n f\" {element_clause}\"\n )\n elif isinstance(osm_tag_value, list) and osm_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in osm_tag_value]\n case_when_clauses.append(\n f\"WHEN {element_clause} IN ({', '.join(values_list)}) THEN\"\n f\" '{osm_tag_key}=' || {element_clause}\"\n )\n case_clause = f'CASE {\" \".join(case_when_clauses)} END'\n case_clauses.append(case_clause)\n\n group_names_as_sql_strings = [f\"'{group_name}'\" for group_name in group_names]\n groups_map = (\n f\"map([{', '.join(group_names_as_sql_strings)}], [{', '.join(case_clauses)}])\"\n )\n non_null_groups_map = f\"\"\"map_from_entries(\n [\n tag_entry\n for tag_entry in map_entries({groups_map})\n if tag_entry.value IS NOT NULL\n ]\n ) as tags\"\"\"\n\n grouped_features_relation = self.connection.sql(f\"\"\"\n SELECT feature_id, {non_null_groups_map}, geometry\n FROM ({features_relation.sql_query()})\n \"\"\")\n\n return grouped_features_relation" } ]
from collections.abc import Iterable from pathlib import Path from typing import Any, Optional, Union from shapely.geometry.base import BaseGeometry from quackosm._osm_tags_filters import GroupedOsmTagsFilter, OsmTagsFilter from quackosm._osm_way_polygon_features import OsmWayPolygonConfig from quackosm.pbf_file_reader import PbfFileReader import geopandas as gpd
17,284
""" Functions. This module contains helper functions to simplify the usage. """ def convert_pbf_to_gpq( pbf_path: Union[str, Path],
""" Functions. This module contains helper functions to simplify the usage. """ def convert_pbf_to_gpq( pbf_path: Union[str, Path],
tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None,
0
2023-12-28 11:26:41+00:00
24k
KyanChen/TTP
mmdet/configs/rtmdet/rtmdet_tiny_8xb32_300e_coco.py
[ { "identifier": "PackDetInputs", "path": "mmdet/datasets/transforms/formatting.py", "snippet": "class PackDetInputs(BaseTransform):\n \"\"\"Pack the inputs data for the detection / semantic segmentation /\n panoptic segmentation.\n\n The ``img_meta`` item is always populated. The contents of the\n ``img_meta`` dictionary depends on ``meta_keys``. By default this includes:\n\n - ``img_id``: id of the image\n\n - ``img_path``: path to the image file\n\n - ``ori_shape``: original shape of the image as a tuple (h, w)\n\n - ``img_shape``: shape of the image input to the network as a tuple \\\n (h, w). Note that images may be zero padded on the \\\n bottom/right if the batch tensor is larger than this shape.\n\n - ``scale_factor``: a float indicating the preprocessing scale\n\n - ``flip``: a boolean indicating if image flip transform was used\n\n - ``flip_direction``: the flipping direction\n\n Args:\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')``\n \"\"\"\n mapping_table = {\n 'gt_bboxes': 'bboxes',\n 'gt_bboxes_labels': 'labels',\n 'gt_masks': 'masks'\n }\n\n def __init__(self,\n meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')):\n self.meta_keys = meta_keys\n\n def transform(self, results: dict) -> dict:\n \"\"\"Method to pack the input data.\n\n Args:\n results (dict): Result dict from the data pipeline.\n\n Returns:\n dict:\n\n - 'inputs' (obj:`torch.Tensor`): The forward data of models.\n - 'data_sample' (obj:`DetDataSample`): The annotation info of the\n sample.\n \"\"\"\n packed_results = dict()\n if 'img' in results:\n img = results['img']\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n # To improve the computational speed by by 3-5 times, apply:\n # If image is not contiguous, use\n # `numpy.transpose()` followed by `numpy.ascontiguousarray()`\n # If image is already contiguous, use\n # `torch.permute()` followed by `torch.contiguous()`\n # Refer to https://github.com/open-mmlab/mmdetection/pull/9533\n # for more details\n if not img.flags.c_contiguous:\n img = np.ascontiguousarray(img.transpose(2, 0, 1))\n img = to_tensor(img)\n else:\n img = to_tensor(img).permute(2, 0, 1).contiguous()\n\n packed_results['inputs'] = img\n\n if 'gt_ignore_flags' in results:\n valid_idx = np.where(results['gt_ignore_flags'] == 0)[0]\n ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0]\n\n data_sample = DetDataSample()\n instance_data = InstanceData()\n ignore_instance_data = InstanceData()\n\n for key in self.mapping_table.keys():\n if key not in results:\n continue\n if key == 'gt_masks' or isinstance(results[key], BaseBoxes):\n if 'gt_ignore_flags' in results:\n instance_data[\n self.mapping_table[key]] = results[key][valid_idx]\n ignore_instance_data[\n self.mapping_table[key]] = results[key][ignore_idx]\n else:\n instance_data[self.mapping_table[key]] = results[key]\n else:\n if 'gt_ignore_flags' in results:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key][valid_idx])\n ignore_instance_data[self.mapping_table[key]] = to_tensor(\n results[key][ignore_idx])\n else:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key])\n data_sample.gt_instances = instance_data\n data_sample.ignored_instances = ignore_instance_data\n\n if 'proposals' in results:\n proposals = InstanceData(\n bboxes=to_tensor(results['proposals']),\n scores=to_tensor(results['proposals_scores']))\n data_sample.proposals = proposals\n\n if 'gt_seg_map' in results:\n gt_sem_seg_data = dict(\n sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy()))\n gt_sem_seg_data = PixelData(**gt_sem_seg_data)\n if 'ignore_index' in results:\n metainfo = dict(ignore_index=results['ignore_index'])\n gt_sem_seg_data.set_metainfo(metainfo)\n data_sample.gt_sem_seg = gt_sem_seg_data\n\n img_meta = {}\n for key in self.meta_keys:\n if key in results:\n img_meta[key] = results[key]\n data_sample.set_metainfo(img_meta)\n packed_results['data_samples'] = data_sample\n\n return packed_results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(meta_keys={self.meta_keys})'\n return repr_str" }, { "identifier": "LoadAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class LoadAnnotations(MMCV_LoadAnnotations):\n \"\"\"Load and process the ``instances`` and ``seg_map`` annotation provided\n by dataset.\n\n The annotation format is as the following:\n\n .. code-block:: python\n\n {\n 'instances':\n [\n {\n # List of 4 numbers representing the bounding box of the\n # instance, in (x1, y1, x2, y2) order.\n 'bbox': [x1, y1, x2, y2],\n\n # Label of image classification.\n 'bbox_label': 1,\n\n # Used in instance/panoptic segmentation. The segmentation mask\n # of the instance or the information of segments.\n # 1. If list[list[float]], it represents a list of polygons,\n # one for each connected component of the object. Each\n # list[float] is one simple polygon in the format of\n # [x1, y1, ..., xn, yn] (n >= 3). The Xs and Ys are absolute\n # coordinates in unit of pixels.\n # 2. If dict, it represents the per-pixel segmentation mask in\n # COCO's compressed RLE format. The dict should have keys\n # “size” and “counts”. Can be loaded by pycocotools\n 'mask': list[list[float]] or dict,\n\n }\n ]\n # Filename of semantic or panoptic segmentation ground truth file.\n 'seg_map_path': 'a/b/c'\n }\n\n After this module, the annotation has been changed to the format below:\n\n .. code-block:: python\n\n {\n # In (x1, y1, x2, y2) order, float type. N is the number of bboxes\n # in an image\n 'gt_bboxes': BaseBoxes(N, 4)\n # In int type.\n 'gt_bboxes_labels': np.ndarray(N, )\n # In built-in class\n 'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W)\n # In uint8 type.\n 'gt_seg_map': np.ndarray (H, W)\n # in (x, y, v) order, float type.\n }\n\n Required Keys:\n\n - height\n - width\n - instances\n\n - bbox (optional)\n - bbox_label\n - mask (optional)\n - ignore_flag\n\n - seg_map_path (optional)\n\n Added Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32])\n - gt_bboxes_labels (np.int64)\n - gt_masks (BitmapMasks | PolygonMasks)\n - gt_seg_map (np.uint8)\n - gt_ignore_flags (bool)\n\n Args:\n with_bbox (bool): Whether to parse and load the bbox annotation.\n Defaults to True.\n with_label (bool): Whether to parse and load the label annotation.\n Defaults to True.\n with_mask (bool): Whether to parse and load the mask annotation.\n Default: False.\n with_seg (bool): Whether to parse and load the semantic segmentation\n annotation. Defaults to False.\n poly2mask (bool): Whether to convert mask to bitmap. Default: True.\n box_type (str): The box type used to wrap the bboxes. If ``box_type``\n is None, gt_bboxes will keep being np.ndarray. Defaults to 'hbox'.\n reduce_zero_label (bool): Whether reduce all label value\n by 1. Usually used for datasets where 0 is background label.\n Defaults to False.\n ignore_index (int): The label index to be ignored.\n Valid only if reduce_zero_label is true. Defaults is 255.\n imdecode_backend (str): The image decoding backend type. The backend\n argument for :func:``mmcv.imfrombytes``.\n See :fun:``mmcv.imfrombytes`` for details.\n Defaults to 'cv2'.\n backend_args (dict, optional): Arguments to instantiate the\n corresponding backend. Defaults to None.\n \"\"\"\n\n def __init__(\n self,\n with_mask: bool = False,\n poly2mask: bool = True,\n box_type: str = 'hbox',\n # use for semseg\n reduce_zero_label: bool = False,\n ignore_index: int = 255,\n **kwargs) -> None:\n super(LoadAnnotations, self).__init__(**kwargs)\n self.with_mask = with_mask\n self.poly2mask = poly2mask\n self.box_type = box_type\n self.reduce_zero_label = reduce_zero_label\n self.ignore_index = ignore_index\n\n def _load_bboxes(self, results: dict) -> None:\n \"\"\"Private function to load bounding box annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n Returns:\n dict: The dict contains loaded bounding box annotations.\n \"\"\"\n gt_bboxes = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_bboxes.append(instance['bbox'])\n gt_ignore_flags.append(instance['ignore_flag'])\n if self.box_type is None:\n results['gt_bboxes'] = np.array(\n gt_bboxes, dtype=np.float32).reshape((-1, 4))\n else:\n _, box_type_cls = get_box_type(self.box_type)\n results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32)\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n\n def _load_labels(self, results: dict) -> None:\n \"\"\"Private function to load label annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded label annotations.\n \"\"\"\n gt_bboxes_labels = []\n for instance in results.get('instances', []):\n gt_bboxes_labels.append(instance['bbox_label'])\n # TODO: Inconsistent with mmcv, consider how to deal with it later.\n results['gt_bboxes_labels'] = np.array(\n gt_bboxes_labels, dtype=np.int64)\n\n def _poly2mask(self, mask_ann: Union[list, dict], img_h: int,\n img_w: int) -> np.ndarray:\n \"\"\"Private function to convert masks represented with polygon to\n bitmaps.\n\n Args:\n mask_ann (list | dict): Polygon mask annotation input.\n img_h (int): The height of output mask.\n img_w (int): The width of output mask.\n\n Returns:\n np.ndarray: The decode bitmap mask of shape (img_h, img_w).\n \"\"\"\n\n if isinstance(mask_ann, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n rle = maskUtils.merge(rles)\n elif isinstance(mask_ann['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n else:\n # rle\n rle = mask_ann\n mask = maskUtils.decode(rle)\n return mask\n\n def _process_masks(self, results: dict) -> list:\n \"\"\"Process gt_masks and filter invalid polygons.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n list: Processed gt_masks.\n \"\"\"\n gt_masks = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_mask = instance['mask']\n # If the annotation of segmentation mask is invalid,\n # ignore the whole instance.\n if isinstance(gt_mask, list):\n gt_mask = [\n np.array(polygon) for polygon in gt_mask\n if len(polygon) % 2 == 0 and len(polygon) >= 6\n ]\n if len(gt_mask) == 0:\n # ignore this instance and set gt_mask to a fake mask\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif not self.poly2mask:\n # `PolygonMasks` requires a ploygon of format List[np.array],\n # other formats are invalid.\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif isinstance(gt_mask, dict) and \\\n not (gt_mask.get('counts') is not None and\n gt_mask.get('size') is not None and\n isinstance(gt_mask['counts'], (list, str))):\n # if gt_mask is a dict, it should include `counts` and `size`,\n # so that `BitmapMasks` can uncompressed RLE\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n gt_masks.append(gt_mask)\n # re-process gt_ignore_flags\n gt_ignore_flags.append(instance['ignore_flag'])\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n return gt_masks\n\n def _load_masks(self, results: dict) -> None:\n \"\"\"Private function to load mask annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n \"\"\"\n h, w = results['ori_shape']\n gt_masks = self._process_masks(results)\n if self.poly2mask:\n gt_masks = BitmapMasks(\n [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)\n else:\n # fake polygon masks will be ignored in `PackDetInputs`\n gt_masks = PolygonMasks([mask for mask in gt_masks], h, w)\n results['gt_masks'] = gt_masks\n\n def _load_seg_map(self, results: dict) -> None:\n \"\"\"Private function to load semantic segmentation annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmcv.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded semantic segmentation annotations.\n \"\"\"\n if results.get('seg_map_path', None) is None:\n return\n\n img_bytes = get(\n results['seg_map_path'], backend_args=self.backend_args)\n gt_semantic_seg = mmcv.imfrombytes(\n img_bytes, flag='unchanged',\n backend=self.imdecode_backend).squeeze()\n\n if self.reduce_zero_label:\n # avoid using underflow conversion\n gt_semantic_seg[gt_semantic_seg == 0] = self.ignore_index\n gt_semantic_seg = gt_semantic_seg - 1\n gt_semantic_seg[gt_semantic_seg == self.ignore_index -\n 1] = self.ignore_index\n\n # modify if custom classes\n if results.get('label_map', None) is not None:\n # Add deep copy to solve bug of repeatedly\n # replace `gt_semantic_seg`, which is reported in\n # https://github.com/open-mmlab/mmsegmentation/pull/1445/\n gt_semantic_seg_copy = gt_semantic_seg.copy()\n for old_id, new_id in results['label_map'].items():\n gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id\n results['gt_seg_map'] = gt_semantic_seg\n results['ignore_index'] = self.ignore_index\n\n def transform(self, results: dict) -> dict:\n \"\"\"Function to load multiple types annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded bounding box, label and\n semantic segmentation.\n \"\"\"\n\n if self.with_bbox:\n self._load_bboxes(results)\n if self.with_label:\n self._load_labels(results)\n if self.with_mask:\n self._load_masks(results)\n if self.with_seg:\n self._load_seg_map(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(with_bbox={self.with_bbox}, '\n repr_str += f'with_label={self.with_label}, '\n repr_str += f'with_mask={self.with_mask}, '\n repr_str += f'with_seg={self.with_seg}, '\n repr_str += f'poly2mask={self.poly2mask}, '\n repr_str += f\"imdecode_backend='{self.imdecode_backend}', \"\n repr_str += f'backend_args={self.backend_args})'\n return repr_str" }, { "identifier": "CachedMixUp", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMixUp(BaseTransform):\n \"\"\"Cached mixup data augmentation.\n\n .. code:: text\n\n mixup transform\n +------------------------------+\n | mixup image | |\n | +--------|--------+ |\n | | | | |\n |---------------+ | |\n | | | |\n | | image | |\n | | | |\n | | | |\n | |-----------------+ |\n | pad |\n +------------------------------+\n\n The cached mixup transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Another random image is picked from the cache and embedded in\n the top left patch(after padding and resizing)\n 3. The target of mixup transform is the weighted average of mixup\n image and origin image.\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n - mix_results (List[dict])\n\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n\n Args:\n img_scale (Sequence[int]): Image output size after mixup pipeline.\n The shape order should be (width, height). Defaults to (640, 640).\n ratio_range (Sequence[float]): Scale ratio of mixup image.\n Defaults to (0.5, 1.5).\n flip_ratio (float): Horizontal flip ratio of mixup image.\n Defaults to 0.5.\n pad_val (int): Pad value. Defaults to 114.\n max_iters (int): The maximum number of iterations. If the number of\n iterations is greater than `max_iters`, but gt_bbox is still\n empty, then the iteration is terminated. Defaults to 15.\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 20.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n \"\"\"\n\n def __init__(self,\n img_scale: Tuple[int, int] = (640, 640),\n ratio_range: Tuple[float, float] = (0.5, 1.5),\n flip_ratio: float = 0.5,\n pad_val: float = 114.0,\n max_iters: int = 15,\n bbox_clip_border: bool = True,\n max_cached_images: int = 20,\n random_pop: bool = True,\n prob: float = 1.0) -> None:\n assert isinstance(img_scale, tuple)\n assert max_cached_images >= 2, 'The length of cache must >= 2, ' \\\n f'but got {max_cached_images}.'\n assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \\\n f'got {prob}.'\n self.dynamic_scale = img_scale\n self.ratio_range = ratio_range\n self.flip_ratio = flip_ratio\n self.pad_val = pad_val\n self.max_iters = max_iters\n self.bbox_clip_border = bbox_clip_border\n self.results_cache = []\n\n self.max_cached_images = max_cached_images\n self.random_pop = random_pop\n self.prob = prob\n\n @cache_randomness\n def get_indexes(self, cache: list) -> int:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The result cache.\n\n Returns:\n int: index.\n \"\"\"\n\n for i in range(self.max_iters):\n index = random.randint(0, len(cache) - 1)\n gt_bboxes_i = cache[index]['gt_bboxes']\n if len(gt_bboxes_i) != 0:\n break\n return index\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"MixUp transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 1:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n\n index = self.get_indexes(self.results_cache)\n retrieve_results = copy.deepcopy(self.results_cache[index])\n\n # TODO: refactor mixup to reuse these code.\n if retrieve_results['gt_bboxes'].shape[0] == 0:\n # empty bbox\n return results\n\n retrieve_img = retrieve_results['img']\n with_mask = True if 'gt_masks' in results else False\n\n jit_factor = random.uniform(*self.ratio_range)\n is_flip = random.uniform(0, 1) > self.flip_ratio\n\n if len(retrieve_img.shape) == 3:\n out_img = np.ones(\n (self.dynamic_scale[1], self.dynamic_scale[0], 3),\n dtype=retrieve_img.dtype) * self.pad_val\n else:\n out_img = np.ones(\n self.dynamic_scale[::-1],\n dtype=retrieve_img.dtype) * self.pad_val\n\n # 1. keep_ratio resize\n scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0],\n self.dynamic_scale[0] / retrieve_img.shape[1])\n retrieve_img = mmcv.imresize(\n retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),\n int(retrieve_img.shape[0] * scale_ratio)))\n\n # 2. paste\n out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img\n\n # 3. scale jit\n scale_ratio *= jit_factor\n out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),\n int(out_img.shape[0] * jit_factor)))\n\n # 4. flip\n if is_flip:\n out_img = out_img[:, ::-1, :]\n\n # 5. random crop\n ori_img = results['img']\n origin_h, origin_w = out_img.shape[:2]\n target_h, target_w = ori_img.shape[:2]\n padded_img = np.ones((max(origin_h, target_h), max(\n origin_w, target_w), 3)) * self.pad_val\n padded_img = padded_img.astype(np.uint8)\n padded_img[:origin_h, :origin_w] = out_img\n\n x_offset, y_offset = 0, 0\n if padded_img.shape[0] > target_h:\n y_offset = random.randint(0, padded_img.shape[0] - target_h)\n if padded_img.shape[1] > target_w:\n x_offset = random.randint(0, padded_img.shape[1] - target_w)\n padded_cropped_img = padded_img[y_offset:y_offset + target_h,\n x_offset:x_offset + target_w]\n\n # 6. adjust bbox\n retrieve_gt_bboxes = retrieve_results['gt_bboxes']\n retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])\n if with_mask:\n retrieve_gt_masks = retrieve_results['gt_masks'].rescale(\n scale_ratio)\n\n if self.bbox_clip_border:\n retrieve_gt_bboxes.clip_([origin_h, origin_w])\n\n if is_flip:\n retrieve_gt_bboxes.flip_([origin_h, origin_w],\n direction='horizontal')\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.flip()\n\n # 7. filter\n cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()\n cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-x_offset,\n direction='horizontal')\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-y_offset,\n direction='vertical')\n\n if self.bbox_clip_border:\n cp_retrieve_gt_bboxes.clip_([target_h, target_w])\n\n # 8. mix up\n ori_img = ori_img.astype(np.float32)\n mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']\n retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']\n\n mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(\n (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)\n mixup_gt_bboxes_labels = np.concatenate(\n (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)\n mixup_gt_ignore_flags = np.concatenate(\n (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)\n if with_mask:\n mixup_gt_masks = retrieve_gt_masks.cat(\n [results['gt_masks'], retrieve_gt_masks])\n\n # remove outside bbox\n inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy()\n mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]\n mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]\n mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]\n if with_mask:\n mixup_gt_masks = mixup_gt_masks[inside_inds]\n\n results['img'] = mixup_img.astype(np.uint8)\n results['img_shape'] = mixup_img.shape[:2]\n results['gt_bboxes'] = mixup_gt_bboxes\n results['gt_bboxes_labels'] = mixup_gt_bboxes_labels\n results['gt_ignore_flags'] = mixup_gt_ignore_flags\n if with_mask:\n results['gt_masks'] = mixup_gt_masks\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(dynamic_scale={self.dynamic_scale}, '\n repr_str += f'ratio_range={self.ratio_range}, '\n repr_str += f'flip_ratio={self.flip_ratio}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'max_iters={self.max_iters}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop}, '\n repr_str += f'prob={self.prob})'\n return repr_str" }, { "identifier": "CachedMosaic", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMosaic(Mosaic):\n \"\"\"Cached mosaic augmentation.\n\n Cached mosaic transform will random select images from the cache\n and combine them into one output image.\n\n .. code:: text\n\n mosaic transform\n center_x\n +------------------------------+\n | pad | pad |\n | +-----------+ |\n | | | |\n | | image1 |--------+ |\n | | | | |\n | | | image2 | |\n center_y |----+-------------+-----------|\n | | cropped | |\n |pad | image3 | image4 |\n | | | |\n +----|-------------+-----------+\n | |\n +-------------+\n\n The cached mosaic transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Choose the mosaic center as the intersections of 4 images\n 3. Get the left top image according to the index, and randomly\n sample another 3 images from the result cache.\n 4. Sub image will be cropped if image is larger than mosaic patch\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n Args:\n img_scale (Sequence[int]): Image size before mosaic pipeline of single\n image. The shape order should be (width, height).\n Defaults to (640, 640).\n center_ratio_range (Sequence[float]): Center ratio range of mosaic\n output. Defaults to (0.5, 1.5).\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n pad_val (int): Pad value. Defaults to 114.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 40.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n \"\"\"\n\n def __init__(self,\n *args,\n max_cached_images: int = 40,\n random_pop: bool = True,\n **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.results_cache = []\n self.random_pop = random_pop\n assert max_cached_images >= 4, 'The length of cache must >= 4, ' \\\n f'but got {max_cached_images}.'\n self.max_cached_images = max_cached_images\n\n @cache_randomness\n def get_indexes(self, cache: list) -> list:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The results cache.\n\n Returns:\n list: indexes.\n \"\"\"\n\n indexes = [random.randint(0, len(cache) - 1) for _ in range(3)]\n return indexes\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Mosaic transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 4:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n indices = self.get_indexes(self.results_cache)\n mix_results = [copy.deepcopy(self.results_cache[i]) for i in indices]\n\n # TODO: refactor mosaic to reuse these code.\n mosaic_bboxes = []\n mosaic_bboxes_labels = []\n mosaic_ignore_flags = []\n mosaic_masks = []\n with_mask = True if 'gt_masks' in results else False\n\n if len(results['img'].shape) == 3:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3),\n self.pad_val,\n dtype=results['img'].dtype)\n else:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)),\n self.pad_val,\n dtype=results['img'].dtype)\n\n # mosaic center x, y\n center_x = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[0])\n center_y = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[1])\n center_position = (center_x, center_y)\n\n loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n for i, loc in enumerate(loc_strs):\n if loc == 'top_left':\n results_patch = copy.deepcopy(results)\n else:\n results_patch = copy.deepcopy(mix_results[i - 1])\n\n img_i = results_patch['img']\n h_i, w_i = img_i.shape[:2]\n # keep_ratio resize\n scale_ratio_i = min(self.img_scale[1] / h_i,\n self.img_scale[0] / w_i)\n img_i = mmcv.imresize(\n img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))\n\n # compute the combine parameters\n paste_coord, crop_coord = self._mosaic_combine(\n loc, center_position, img_i.shape[:2][::-1])\n x1_p, y1_p, x2_p, y2_p = paste_coord\n x1_c, y1_c, x2_c, y2_c = crop_coord\n\n # crop and paste image\n mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]\n\n # adjust coordinate\n gt_bboxes_i = results_patch['gt_bboxes']\n gt_bboxes_labels_i = results_patch['gt_bboxes_labels']\n gt_ignore_flags_i = results_patch['gt_ignore_flags']\n\n padw = x1_p - x1_c\n padh = y1_p - y1_c\n gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])\n gt_bboxes_i.translate_([padw, padh])\n mosaic_bboxes.append(gt_bboxes_i)\n mosaic_bboxes_labels.append(gt_bboxes_labels_i)\n mosaic_ignore_flags.append(gt_ignore_flags_i)\n if with_mask and results_patch.get('gt_masks', None) is not None:\n gt_masks_i = results_patch['gt_masks']\n gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i))\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padw,\n direction='horizontal')\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padh,\n direction='vertical')\n mosaic_masks.append(gt_masks_i)\n\n mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)\n mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)\n mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)\n\n if self.bbox_clip_border:\n mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]])\n # remove outside bboxes\n inside_inds = mosaic_bboxes.is_inside(\n [2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy()\n mosaic_bboxes = mosaic_bboxes[inside_inds]\n mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]\n mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]\n\n results['img'] = mosaic_img\n results['img_shape'] = mosaic_img.shape[:2]\n results['gt_bboxes'] = mosaic_bboxes\n results['gt_bboxes_labels'] = mosaic_bboxes_labels\n results['gt_ignore_flags'] = mosaic_ignore_flags\n\n if with_mask:\n mosaic_masks = mosaic_masks[0].cat(mosaic_masks)\n results['gt_masks'] = mosaic_masks[inside_inds]\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(img_scale={self.img_scale}, '\n repr_str += f'center_ratio_range={self.center_ratio_range}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'prob={self.prob}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop})'\n return repr_str" }, { "identifier": "Pad", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Pad(MMCV_Pad):\n \"\"\"Pad the image & segmentation map.\n\n There are three padding modes: (1) pad to a fixed size and (2) pad to the\n minimum size that is divisible by some number. and (3)pad to square. Also,\n pad to square and pad to the minimum size can be used as the same time.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - pad_shape\n - pad_fixed_size\n - pad_size_divisor\n\n Args:\n size (tuple, optional): Fixed padding size.\n Expected padding shape (width, height). Defaults to None.\n size_divisor (int, optional): The divisor of padded size. Defaults to\n None.\n pad_to_square (bool): Whether to pad the image into a square.\n Currently only used for YOLOX. Defaults to False.\n pad_val (Number | dict[str, Number], optional) - Padding value for if\n the pad_mode is \"constant\". If it is a single number, the value\n to pad the image is the number and to pad the semantic\n segmentation map is 255. If it is a dict, it should have the\n following keys:\n\n - img: The value to pad the image.\n - seg: The value to pad the semantic segmentation map.\n Defaults to dict(img=0, seg=255).\n padding_mode (str): Type of padding. Should be: constant, edge,\n reflect or symmetric. Defaults to 'constant'.\n\n - constant: pads with a constant value, this value is specified\n with pad_val.\n - edge: pads with the last value at the edge of the image.\n - reflect: pads with reflection of image without repeating the last\n value on the edge. For example, padding [1, 2, 3, 4] with 2\n elements on both sides in reflect mode will result in\n [3, 2, 1, 2, 3, 4, 3, 2].\n - symmetric: pads with reflection of image repeating the last value\n on the edge. For example, padding [1, 2, 3, 4] with 2 elements on\n both sides in symmetric mode will result in\n [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def _pad_masks(self, results: dict) -> None:\n \"\"\"Pad masks according to ``results['pad_shape']``.\"\"\"\n if results.get('gt_masks', None) is not None:\n pad_val = self.pad_val.get('masks', 0)\n pad_shape = results['pad_shape'][:2]\n results['gt_masks'] = results['gt_masks'].pad(\n pad_shape, pad_val=pad_val)\n\n def transform(self, results: dict) -> dict:\n \"\"\"Call function to pad images, masks, semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n self._pad_img(results)\n self._pad_seg(results)\n self._pad_masks(results)\n return results" }, { "identifier": "RandomCrop", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomCrop(BaseTransform):\n \"\"\"Random crop the image & bboxes & masks.\n\n The absolute ``crop_size`` is sampled based on ``crop_type`` and\n ``image_size``, then the cropped results are generated.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n - gt_seg_map (optional)\n - gt_instances_ids (options, only used in MOT/VIS)\n\n Added Keys:\n\n - homography_matrix\n\n Args:\n crop_size (tuple): The relative ratio or absolute pixels of\n (width, height).\n crop_type (str, optional): One of \"relative_range\", \"relative\",\n \"absolute\", \"absolute_range\". \"relative\" randomly crops\n (h * crop_size[0], w * crop_size[1]) part from an input of size\n (h, w). \"relative_range\" uniformly samples relative crop size from\n range [crop_size[0], 1] and [crop_size[1], 1] for height and width\n respectively. \"absolute\" crops from an input with absolute size\n (crop_size[0], crop_size[1]). \"absolute_range\" uniformly samples\n crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w\n in range [crop_size[0], min(w, crop_size[1])].\n Defaults to \"absolute\".\n allow_negative_crop (bool, optional): Whether to allow a crop that does\n not contain any bbox area. Defaults to False.\n recompute_bbox (bool, optional): Whether to re-compute the boxes based\n on cropped instance masks. Defaults to False.\n bbox_clip_border (bool, optional): Whether clip the objects outside\n the border of the image. Defaults to True.\n\n Note:\n - If the image is smaller than the absolute crop size, return the\n original image.\n - The keys for bboxes, labels and masks must be aligned. That is,\n ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and\n ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and\n ``gt_masks_ignore``.\n - If the crop does not contain any gt-bbox region and\n ``allow_negative_crop`` is set to False, skip this image.\n \"\"\"\n\n def __init__(self,\n crop_size: tuple,\n crop_type: str = 'absolute',\n allow_negative_crop: bool = False,\n recompute_bbox: bool = False,\n bbox_clip_border: bool = True) -> None:\n if crop_type not in [\n 'relative_range', 'relative', 'absolute', 'absolute_range'\n ]:\n raise ValueError(f'Invalid crop_type {crop_type}.')\n if crop_type in ['absolute', 'absolute_range']:\n assert crop_size[0] > 0 and crop_size[1] > 0\n assert isinstance(crop_size[0], int) and isinstance(\n crop_size[1], int)\n if crop_type == 'absolute_range':\n assert crop_size[0] <= crop_size[1]\n else:\n assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1\n self.crop_size = crop_size\n self.crop_type = crop_type\n self.allow_negative_crop = allow_negative_crop\n self.bbox_clip_border = bbox_clip_border\n self.recompute_bbox = recompute_bbox\n\n def _crop_data(self, results: dict, crop_size: Tuple[int, int],\n allow_negative_crop: bool) -> Union[dict, None]:\n \"\"\"Function to randomly crop images, bounding boxes, masks, semantic\n segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n crop_size (Tuple[int, int]): Expected absolute size after\n cropping, (h, w).\n allow_negative_crop (bool): Whether to allow a crop that does not\n contain any bbox area.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n assert crop_size[0] > 0 and crop_size[1] > 0\n img = results['img']\n margin_h = max(img.shape[0] - crop_size[0], 0)\n margin_w = max(img.shape[1] - crop_size[1], 0)\n offset_h, offset_w = self._rand_offset((margin_h, margin_w))\n crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n # Record the homography matrix for the RandomCrop\n homography_matrix = np.array(\n [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]],\n dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n # crop the image\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n img_shape = img.shape\n results['img'] = img\n results['img_shape'] = img_shape[:2]\n\n # crop bboxes accordingly and clip to the image boundary\n if results.get('gt_bboxes', None) is not None:\n bboxes = results['gt_bboxes']\n bboxes.translate_([-offset_w, -offset_h])\n if self.bbox_clip_border:\n bboxes.clip_(img_shape[:2])\n valid_inds = bboxes.is_inside(img_shape[:2]).numpy()\n # If the crop does not contain any gt-bbox area and\n # allow_negative_crop is False, skip this image.\n if (not valid_inds.any() and not allow_negative_crop):\n return None\n\n results['gt_bboxes'] = bboxes[valid_inds]\n\n if results.get('gt_ignore_flags', None) is not None:\n results['gt_ignore_flags'] = \\\n results['gt_ignore_flags'][valid_inds]\n\n if results.get('gt_bboxes_labels', None) is not None:\n results['gt_bboxes_labels'] = \\\n results['gt_bboxes_labels'][valid_inds]\n\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'][\n valid_inds.nonzero()[0]].crop(\n np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n if self.recompute_bbox:\n results['gt_bboxes'] = results['gt_masks'].get_bboxes(\n type(results['gt_bboxes']))\n\n # We should remove the instance ids corresponding to invalid boxes.\n if results.get('gt_instances_ids', None) is not None:\n results['gt_instances_ids'] = \\\n results['gt_instances_ids'][valid_inds]\n\n # crop semantic seg\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2,\n crop_x1:crop_x2]\n\n return results\n\n @cache_randomness\n def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generate crop offset.\n\n Args:\n margin (Tuple[int, int]): The upper bound for the offset generated\n randomly.\n\n Returns:\n Tuple[int, int]: The random offset for the crop.\n \"\"\"\n margin_h, margin_w = margin\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n\n return offset_h, offset_w\n\n @cache_randomness\n def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generates the absolute crop size based on `crop_type` and\n `image_size`.\n\n Args:\n image_size (Tuple[int, int]): (h, w).\n\n Returns:\n crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels.\n \"\"\"\n h, w = image_size\n if self.crop_type == 'absolute':\n return min(self.crop_size[1], h), min(self.crop_size[0], w)\n elif self.crop_type == 'absolute_range':\n crop_h = np.random.randint(\n min(h, self.crop_size[0]),\n min(h, self.crop_size[1]) + 1)\n crop_w = np.random.randint(\n min(w, self.crop_size[0]),\n min(w, self.crop_size[1]) + 1)\n return crop_h, crop_w\n elif self.crop_type == 'relative':\n crop_w, crop_h = self.crop_size\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n else:\n # 'relative_range'\n crop_size = np.asarray(self.crop_size, dtype=np.float32)\n crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to randomly crop images, bounding boxes, masks,\n semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n image_size = results['img'].shape[:2]\n crop_size = self._get_crop_size(image_size)\n results = self._crop_data(results, crop_size, self.allow_negative_crop)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(crop_size={self.crop_size}, '\n repr_str += f'crop_type={self.crop_type}, '\n repr_str += f'allow_negative_crop={self.allow_negative_crop}, '\n repr_str += f'recompute_bbox={self.recompute_bbox}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n return repr_str" }, { "identifier": "RandomFlip", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomFlip(MMCV_RandomFlip):\n \"\"\"Flip the image & bbox & mask & segmentation map. Added or Updated keys:\n flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip\n modes:\n\n - ``prob`` is float, ``direction`` is string: the image will be\n ``direction``ly flipped with probability of ``prob`` .\n E.g., ``prob=0.5``, ``direction='horizontal'``,\n then image will be horizontally flipped with probability of 0.5.\n - ``prob`` is float, ``direction`` is list of string: the image will\n be ``direction[i]``ly flipped with probability of\n ``prob/len(direction)``.\n E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``,\n then image will be horizontally flipped with probability of 0.25,\n vertically with probability of 0.25.\n - ``prob`` is list of float, ``direction`` is list of string:\n given ``len(prob) == len(direction)``, the image will\n be ``direction[i]``ly flipped with probability of ``prob[i]``.\n E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal',\n 'vertical']``, then image will be horizontally flipped with\n probability of 0.3, vertically with probability of 0.5.\n\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - flip\n - flip_direction\n - homography_matrix\n\n\n Args:\n prob (float | list[float], optional): The flipping probability.\n Defaults to None.\n direction(str | list[str]): The flipping direction. Options\n If input is a list, the length must equal ``prob``. Each\n element in ``prob`` indicates the flip probability of\n corresponding direction. Defaults to 'horizontal'.\n \"\"\"\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the RandomFlip.\"\"\"\n cur_dir = results['flip_direction']\n h, w = results['img'].shape[:2]\n\n if cur_dir == 'horizontal':\n homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'vertical':\n homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'diagonal':\n homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n else:\n homography_matrix = np.eye(3, dtype=np.float32)\n\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def _flip(self, results: dict) -> None:\n \"\"\"Flip images, bounding boxes, and semantic segmentation map.\"\"\"\n # flip image\n results['img'] = mmcv.imflip(\n results['img'], direction=results['flip_direction'])\n\n img_shape = results['img'].shape[:2]\n\n # flip bboxes\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].flip_(img_shape, results['flip_direction'])\n\n # flip masks\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'].flip(\n results['flip_direction'])\n\n # flip segs\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = mmcv.imflip(\n results['gt_seg_map'], direction=results['flip_direction'])\n\n # record homography matrix for flip\n self._record_homography_matrix(results)" }, { "identifier": "Resize", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Resize(MMCV_Resize):\n \"\"\"Resize images & bbox & seg.\n\n This transform resizes the input image according to ``scale`` or\n ``scale_factor``. Bboxes, masks, and seg map are then resized\n with the same scale factor.\n if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to\n resize.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n\n Added Keys:\n\n - scale\n - scale_factor\n - keep_ratio\n - homography_matrix\n\n Args:\n scale (int or tuple): Images scales for resizing. Defaults to None\n scale_factor (float or tuple[float]): Scale factors for resizing.\n Defaults to None.\n keep_ratio (bool): Whether to keep the aspect ratio when resizing the\n image. Defaults to False.\n clip_object_border (bool): Whether to clip the objects\n outside the border of the image. In some dataset like MOT17, the gt\n bboxes are allowed to cross the border of images. Therefore, we\n don't need to clip the gt bboxes in these cases. Defaults to True.\n backend (str): Image resize backend, choices are 'cv2' and 'pillow'.\n These two backends generates slightly different results. Defaults\n to 'cv2'.\n interpolation (str): Interpolation method, accepted values are\n \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n to 'bilinear'.\n \"\"\"\n\n def _resize_masks(self, results: dict) -> None:\n \"\"\"Resize masks with ``results['scale']``\"\"\"\n if results.get('gt_masks', None) is not None:\n if self.keep_ratio:\n results['gt_masks'] = results['gt_masks'].rescale(\n results['scale'])\n else:\n results['gt_masks'] = results['gt_masks'].resize(\n results['img_shape'])\n\n def _resize_bboxes(self, results: dict) -> None:\n \"\"\"Resize bounding boxes with ``results['scale_factor']``.\"\"\"\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].rescale_(results['scale_factor'])\n if self.clip_object_border:\n results['gt_bboxes'].clip_(results['img_shape'])\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the Resize.\"\"\"\n w_scale, h_scale = results['scale_factor']\n homography_matrix = np.array(\n [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Transform function to resize images, bounding boxes and semantic\n segmentation map.\n\n Args:\n results (dict): Result dict from loading pipeline.\n Returns:\n dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map',\n 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys\n are updated in result dict.\n \"\"\"\n if self.scale:\n results['scale'] = self.scale\n else:\n img_shape = results['img'].shape[:2]\n results['scale'] = _scale_size(img_shape[::-1], self.scale_factor)\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n self._record_homography_matrix(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(scale={self.scale}, '\n repr_str += f'scale_factor={self.scale_factor}, '\n repr_str += f'keep_ratio={self.keep_ratio}, '\n repr_str += f'clip_object_border={self.clip_object_border}), '\n repr_str += f'backend={self.backend}), '\n repr_str += f'interpolation={self.interpolation})'\n return repr_str" }, { "identifier": "YOLOXHSVRandomAug", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class YOLOXHSVRandomAug(BaseTransform):\n \"\"\"Apply HSV augmentation to image sequentially. It is referenced from\n https://github.com/Megvii-\n BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21.\n\n Required Keys:\n\n - img\n\n Modified Keys:\n\n - img\n\n Args:\n hue_delta (int): delta of hue. Defaults to 5.\n saturation_delta (int): delta of saturation. Defaults to 30.\n value_delta (int): delat of value. Defaults to 30.\n \"\"\"\n\n def __init__(self,\n hue_delta: int = 5,\n saturation_delta: int = 30,\n value_delta: int = 30) -> None:\n self.hue_delta = hue_delta\n self.saturation_delta = saturation_delta\n self.value_delta = value_delta\n\n @cache_randomness\n def _get_hsv_gains(self):\n hsv_gains = np.random.uniform(-1, 1, 3) * [\n self.hue_delta, self.saturation_delta, self.value_delta\n ]\n # random selection of h, s, v\n hsv_gains *= np.random.randint(0, 2, 3)\n # prevent overflow\n hsv_gains = hsv_gains.astype(np.int16)\n return hsv_gains\n\n def transform(self, results: dict) -> dict:\n img = results['img']\n hsv_gains = self._get_hsv_gains()\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16)\n\n img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180\n img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255)\n img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255)\n cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img)\n\n results['img'] = img\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(hue_delta={self.hue_delta}, '\n repr_str += f'saturation_delta={self.saturation_delta}, '\n repr_str += f'value_delta={self.value_delta})'\n return repr_str" } ]
from mmengine.config import read_base from .rtmdet_s_8xb32_300e_coco import * from mmcv.transforms.loading import LoadImageFromFile from mmcv.transforms.processing import RandomResize from mmdet.datasets.transforms.formatting import PackDetInputs from mmdet.datasets.transforms.loading import LoadAnnotations from mmdet.datasets.transforms.transforms import (CachedMixUp, CachedMosaic, Pad, RandomCrop, RandomFlip, Resize, YOLOXHSVRandomAug)
16,552
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.167, widen_factor=0.375, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1), bbox_head=dict(in_channels=96, feat_channels=96, exp_on_reg=False))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict(type=LoadAnnotations, with_bbox=True), dict( type=CachedMosaic, img_scale=(640, 640), pad_val=114.0, max_cached_images=20, random_pop=False), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True), dict(type=RandomCrop, crop_size=(640, 640)), dict(type=YOLOXHSVRandomAug), dict(type=RandomFlip, prob=0.5), dict(type=Pad, size=(640, 640), pad_val=dict(img=(114, 114, 114))), dict( type=CachedMixUp, img_scale=(640, 640), ratio_range=(1.0, 1.0), max_cached_images=10, random_pop=False, pad_val=(114, 114, 114), prob=0.5),
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.167, widen_factor=0.375, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1), bbox_head=dict(in_channels=96, feat_channels=96, exp_on_reg=False))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict(type=LoadAnnotations, with_bbox=True), dict( type=CachedMosaic, img_scale=(640, 640), pad_val=114.0, max_cached_images=20, random_pop=False), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True), dict(type=RandomCrop, crop_size=(640, 640)), dict(type=YOLOXHSVRandomAug), dict(type=RandomFlip, prob=0.5), dict(type=Pad, size=(640, 640), pad_val=dict(img=(114, 114, 114))), dict( type=CachedMixUp, img_scale=(640, 640), ratio_range=(1.0, 1.0), max_cached_images=10, random_pop=False, pad_val=(114, 114, 114), prob=0.5),
dict(type=PackDetInputs)
0
2023-12-23 08:36:47+00:00
24k
see2023/Bert-VITS2-ext
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n v_model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 384)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert)\n\n def get_audio(self, filename):\n audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)\n '''\n # from https://github.com/YYuX-1145/Bert-VITS2-Integration-package\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n '''\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n if config.train_ms_config.spec_cache:\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.randn(1024, len(phone))\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.randn(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.randn(1024, len(phone))\n ja_bert = torch.randn(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "AudioVisemesLoader", "path": "data_utils.py", "snippet": "class AudioVisemesLoader(torch.utils.data.Dataset):\n \"\"\"\n loads audio, visemes torch variable pairs from visemes list file .\n file is like: \n ./records/date_time.z.npy|./records/date_time.npy\n \"\"\"\n \n def __init__(self, audio_visemes_list_file, hparams):\n self.audio_visemes_list_items = load_filepaths_and_text(audio_visemes_list_file)\n print('audio_visemes_list_items: ', len(self.audio_visemes_list_items))\n random.seed(1234)\n random.shuffle(self.audio_visemes_list_items)\n self.max_visemes_len = 1210\n self.min_visemes_len = 1190\n self._filter()\n\n\n def _filter(self):\n # check if the file exists, and can parse as torch tensor\n audio_visemes_list_items_new = []\n for audio_file, visemes_file in self.audio_visemes_list_items:\n if os.path.exists(audio_file) and os.path.exists(visemes_file):\n # check using torch.load\n try:\n audio = torch.load(audio_file)\n visemes = np.load(visemes_file)\n if visemes.shape[0] < self.min_visemes_len:\n print('drop this data: --------- visemes.shape[0] < self.min_visemes_len: ', visemes.shape[0], visemes_file)\n continue\n audio_visemes_list_items_new.append([audio_file, visemes_file])\n except Exception as e:\n print('error: ', audio_file, visemes_file)\n print(e)\n self.audio_visemes_list_items = audio_visemes_list_items_new\n print('audio_visemes_list_items after filter: ', len(self.audio_visemes_list_items))\n\n def __getitem__(self, index):\n # read these two torch.tensor\n audio_file, visemes_file = self.audio_visemes_list_items[index]\n audio_z = torch.load(audio_file).squeeze(0).detach()\n # [192, seq_len(1722)]\n\n visemes = np.load(visemes_file)\n visemes = torch.from_numpy(visemes)\n #[seq_len(1194), 61]\n visemes = visemes.transpose(0, 1)\n #[61, seq_len(1194)]\n if visemes.shape[1] > self.max_visemes_len:\n # cut the extra part\n # print('__getitem__ 1 cut visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n visemes = visemes[:, :self.max_visemes_len]\n elif visemes.shape[1] < self.max_visemes_len:\n # padding to max_visemes_len with last frame\n # print('__getitem__ 2 padding visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n # last_frame = visemes[-1]\n # visemes = np.concatenate([visemes, np.tile(last_frame, (self.max_visemes_len - visemes.shape[0], 1))], axis=0)\n # visemes = torch.from_numpy(visemes)\n pass\n\n visemes_offset = 0.08 # 将visemes延迟n s\n visemes_offset_frames = int(visemes_offset * const_map.ARKIT_FPS)\n visemes = visemes[:, visemes_offset_frames:]\n\n audio_z_offset = 0.0\n audio_z_offset_frames = int(audio_z_offset * const_map.Z_FPS)\n audio_z = audio_z[:, audio_z_offset_frames:]\n\n # 获取二者的时长,将过长的一方多的部分丢弃\n visemes_duration = visemes.shape[1] / const_map.ARKIT_FPS\n audio_z_duration = audio_z.shape[1] / const_map.Z_FPS\n if visemes_duration > audio_z_duration:\n visemes = visemes[:, :int(audio_z_duration * const_map.ARKIT_FPS)]\n elif visemes_duration < audio_z_duration:\n audio_z = audio_z[:, :int(visemes_duration * const_map.Z_FPS)]\n\n\n # print('__getitem__ 3 audio.shape: ', audio.shape, 'visemes.shape: ', visemes.shape,'file: ', visemes_file)\n return audio_z, visemes\n\n def __len__(self):\n return len(self.audio_visemes_list_items)" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n logw_sdp = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n l_length_sdp += torch.sum((logw_sdp - logw_) ** 2, [1, 2]) / torch.sum(x_mask)\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_, logw_sdp),\n g,\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n def get_post_enc_dec(self):\n return self.enc_q, self.dec" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.LSTM = nn.LSTM(\n 2 * filter_channels, filter_channels, batch_first=True, bidirectional=True\n )\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(2 * filter_channels, 1), nn.Sigmoid()\n )\n\n def forward_probability(self, x, dur):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = x.transpose(1, 2)\n x, _ = self.LSTM(x)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, dur)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "WavLMDiscriminator", "path": "models.py", "snippet": "class WavLMDiscriminator(nn.Module):\n \"\"\"docstring for Discriminator.\"\"\"\n\n def __init__(\n self, slm_hidden=768, slm_layers=13, initial_channel=64, use_spectral_norm=False\n ):\n super(WavLMDiscriminator, self).__init__()\n norm_f = weight_norm if use_spectral_norm == False else spectral_norm\n self.pre = norm_f(\n Conv1d(slm_hidden * slm_layers, initial_channel, 1, 1, padding=0)\n )\n\n self.convs = nn.ModuleList(\n [\n norm_f(\n nn.Conv1d(\n initial_channel, initial_channel * 2, kernel_size=5, padding=2\n )\n ),\n norm_f(\n nn.Conv1d(\n initial_channel * 2,\n initial_channel * 4,\n kernel_size=5,\n padding=2,\n )\n ),\n norm_f(\n nn.Conv1d(initial_channel * 4, initial_channel * 4, 5, 1, padding=2)\n ),\n ]\n )\n\n self.conv_post = norm_f(Conv1d(initial_channel * 4, 1, 3, 1, padding=1))\n\n def forward(self, x):\n x = self.pre(x)\n\n fmap = []\n for l in self.convs:\n x = l(x)\n x = F.leaky_relu(x, modules.LRELU_SLOPE)\n fmap.append(x)\n x = self.conv_post(x)\n x = torch.flatten(x, 1, -1)\n\n return x" }, { "identifier": "VisemesNet", "path": "models.py", "snippet": "class VisemesNet(nn.Module):\n def active(self, x):\n # active_fun: 0: null, 1: tanh, 2: relu, 3: LeakyReLU\n if self.active_fun == 1:\n return torch.tanh(x)\n elif self.active_fun == 2:\n return torch.relu(x)\n elif self.active_fun == 3:\n return self.leakyReLU(x)\n else:\n return x\n\n def __init__(self, hidden_channels, lstm_bidirectional=True, active_fun = 3, enable_conv=True, \n use_transformer = False, enable_dropout=True):\n super(VisemesNet, self).__init__()\n self.lstm_bidirectional = lstm_bidirectional\n self.lstm_directions = 2 if lstm_bidirectional else 1\n self.use_transformer = use_transformer\n self.enable_dropout = enable_dropout\n if active_fun == 3:\n self.leakyReLU = nn.LeakyReLU(negative_slope=0.01)\n if use_transformer:\n num_heads=8\n num_layers=3\n dim_feedforward=512\n dropout=0.1\n activation=\"relu\"\n self.transformer_encoder_layer = nn.TransformerEncoderLayer(\n d_model=hidden_channels, \n nhead=num_heads,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n batch_first=True\n )\n self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer, num_layers=num_layers)\n else:\n self.lstm = nn.LSTM(input_size=hidden_channels, hidden_size=128, num_layers=3, batch_first=True, bidirectional=lstm_bidirectional)\n if use_transformer:\n self.fc1 = nn.Linear(hidden_channels, 96)\n else:\n self.fc1 = nn.Linear(128 * self.lstm_directions, 96)\n self.fc2 = nn.Linear(96, 61)\n dropout_rate = 0.5\n if self.enable_dropout:\n self.dropout = nn.Dropout(dropout_rate)\n conv_kernel_pre = 15\n conv_kernel_post = 11\n self.conv1d_pre = nn.Conv1d(in_channels=hidden_channels, out_channels=hidden_channels, kernel_size=conv_kernel_pre, stride=1, padding=conv_kernel_pre//2)\n self.conv1d_post = nn.Conv1d(in_channels=61, out_channels=61, kernel_size=conv_kernel_post, stride=1, padding=conv_kernel_post//2)\n self.enable_conv = enable_conv\n self.active_fun = active_fun\n\n def forward(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.use_transformer:\n return self.forward_transformer(x, y)\n else:\n return self.forward_lstm(x, y)\n\n def forward_transformer(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n # batch_first: True (batch, seq, feature); False (seq, batch, feature).\n x = x.transpose(1, 2)\n\n expressions = self.transformer_encoder(x)\n \n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n # expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n\n return expressions \n\n def forward_lstm(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n x = x.transpose(1, 2)\n # x [batch_size, seq_len, hidden_channels]\n expressions = None\n expressions, _ = self.lstm(x)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n return expressions\n \n def init_weights(self):\n # 初始化权重\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.LSTM):\n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n nn.init.xavier_uniform_(param.data)\n elif 'weight_hh' in name:\n nn.init.orthogonal_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight.data, 1)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Conv1d):\n nn.init.xavier_uniform_(m.weight.data)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.TransformerEncoderLayer):\n for name, param in m.named_parameters():\n if 'weight' in name:\n if param.dim() == 1:\n nn.init.normal_(param.data)\n else:\n nn.init.xavier_uniform_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.TransformerEncoder):\n for param in m.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param.data)\n else:\n nn.init.constant_(param.data, 0)" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "WavLMLoss", "path": "losses.py", "snippet": "class WavLMLoss(torch.nn.Module):\n def __init__(self, model, wd, model_sr, slm_sr=16000):\n super(WavLMLoss, self).__init__()\n self.wavlm = AutoModel.from_pretrained(model)\n self.wd = wd\n self.resample = torchaudio.transforms.Resample(model_sr, slm_sr)\n self.wavlm.eval()\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n def forward(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16.squeeze(), output_hidden_states=True\n ).hidden_states\n\n floss = 0\n for er, eg in zip(wav_embeddings, y_rec_embeddings):\n floss += torch.mean(torch.abs(er - eg))\n\n return floss.mean()\n\n def generator(self, y_rec):\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_df_hat_g = self.wd(y_rec_embeddings)\n loss_gen = torch.mean((1 - y_df_hat_g) ** 2)\n\n return loss_gen\n\n def discriminator(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n y_d_gs = self.wd(y_rec_embeddings)\n\n y_df_hat_r, y_df_hat_g = y_d_rs, y_d_gs\n\n r_loss = torch.mean((1 - y_df_hat_r) ** 2)\n g_loss = torch.mean((y_df_hat_g) ** 2)\n\n loss_disc_f = r_loss + g_loss\n\n return loss_disc_f.mean()\n\n def discriminator_forward(self, wav):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n\n return y_d_rs" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, AudioVisemesLoader, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, WavLMDiscriminator, VisemesNet, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss, WavLMLoss, ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
15,404
attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1).float(), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw_.detach(), logw.detach(), g.detach(), ) y_dur_hat_r_sdp, y_dur_hat_g_sdp = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw_.detach(), logw_sdp.detach(), g.detach(), ) y_dur_hat_r = y_dur_hat_r + y_dur_hat_r_sdp y_dur_hat_g = y_dur_hat_g + y_dur_hat_g_sdp with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) # torch.nn.utils.clip_grad_norm_( # parameters=net_dur_disc.parameters(), max_norm=100 # ) grad_norm_dur = commons.clip_grad_value_( net_dur_disc.parameters(), None ) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) if getattr(hps.train, "bf16_run", False): torch.nn.utils.clip_grad_norm_(parameters=net_d.parameters(), max_norm=200) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): loss_slm = wl.discriminator( y.detach().squeeze(), y_hat.detach().squeeze() ).mean() optim_wd.zero_grad() scaler.scale(loss_slm).backward() scaler.unscale_(optim_wd) # torch.nn.utils.clip_grad_norm_(parameters=net_wd.parameters(), max_norm=200) grad_norm_wd = commons.clip_grad_value_(net_wd.parameters(), None) scaler.step(optim_wd) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: _, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw_, logw, g) _, y_dur_hat_g_sdp = net_dur_disc(hidden_x, x_mask, logw_, logw_sdp, g) y_dur_hat_g = y_dur_hat_g + y_dur_hat_g_sdp with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False) net_v = VisemesNet(hps.model.hidden_channels).cuda() latest_model_path = utils.latest_checkpoint_path(hps.model_dir, "V_*.pth") if latest_model_path is not None: _, optim_d, _, epoch_str = utils.load_checkpoint(latest_model_path, net_v, None, skip_optimizer=False) else : epoch_str = 1 global_visemes_step = 0 net_v.init_weights() optim_v = torch.optim.AdamW( net_v.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_v.param_groups[0]['initial_lr'] = hps.train.learning_rate scheduler_v = torch.optim.lr_scheduler.ExponentialLR(optim_v, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2, ) scaler = GradScaler(enabled=hps.train.bf16_run) for epoch in range(epoch_str, hps.train.epochs + 1): train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler) scheduler_v.step() if epoch % hps.train.eval_interval == 0: eval_visemes_only(epoch, hps, net_v, eval_loader) utils.save_checkpoint(net_v, optim_v,hps.train.learning_rate , epoch, os.path.join(hps.model_dir, "V_{}.pth".format(epoch))) def train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler): for batch_idx, (spec, visemes) in tqdm(enumerate(train_loader)): spec, visemes = spec.cuda(), visemes.cuda() with autocast(enabled=hps.train.bf16_run): # 通过VisemesNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) optim_v.zero_grad() scaler.scale(visemes_hat_mse).backward() scaler.unscale_(optim_v) grad_norm_v = commons.clip_grad_value_(net_v.parameters(), None) scaler.step(optim_v) global global_visemes_step global_visemes_step += 1 if batch_idx % hps.train.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tvisemes_hat_mse: {:.6f}\tgrad_norm_v: {:.6f}'.format( epoch, batch_idx * len(spec), len(train_loader.dataset), 100. * batch_idx / len(train_loader), visemes_hat_mse.item(), grad_norm_v)) def get_visemes_mse(visemes, visemes_hat): if visemes.shape[-1] != visemes_hat.shape[-1]: # 如果y和x的最低维度不一样 visemes_hat = F.interpolate(visemes_hat, size=visemes.shape[-1], mode='linear', align_corners=True) # 对x进行线性插值,使其形状与y一致 visemes_hat_mse = torch.mean(torch.pow(visemes_hat - visemes, 2)) return visemes_hat_mse def eval_visemes_only(epoch, hps, net_v, eval_loader): net_v.eval() with torch.no_grad(): visemes_hat_mse_sum = 0.0 for batch_idx, (spec, visemes) in tqdm(enumerate(eval_loader)): spec, visemes = spec.cuda(), visemes.cuda() # 通过VisemesFCNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) visemes_hat_mse_sum += visemes_hat_mse # print('visemes_hat_mse', visemes_hat_mse) break visemes_hat_mse_avg = visemes_hat_mse_sum / (batch_idx + 1) log_str = '------------------ eval epoch: {} visemes_hat_mse_avg: {:.6f}'.format(epoch, visemes_hat_mse_avg) print(log_str) logger.warning(log_str) net_v.train() def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) backend = "nccl" if platform.system() == "Windows": backend = "gloo" # If Windows,switch to gloo backend. dist.init_process_group( backend=backend, init_method="env://", timeout=datetime.timedelta(seconds=300), ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) parser.add_argument('--visemes', dest='visemes', action="store_true", default=False, help="train visemes only, lock the encoder and decoder") args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir set_logger(hps) if args.visemes: run_only_visemes(hps) # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) else: net_dur_disc = None if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) net_wd = WavLMDiscriminator( hps.model.slm.hidden, hps.model.slm.nlayers, hps.model.slm.initial_channel ).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_wd = torch.optim.AdamW( net_wd.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank], bucket_cap_mb=512) net_d = DDP(net_d, device_ids=[local_rank], bucket_cap_mb=512) net_wd = DDP(net_wd, device_ids=[local_rank], bucket_cap_mb=512) if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], bucket_cap_mb=512, ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) dur_resume_lr = hps.train.learning_rate wd_resume_lr = hps.train.learning_rate if net_dur_disc is not None: try: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr except: print("Initialize dur_disc") try: _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr epoch_str = max(epoch_str, 1) # global_step = (epoch_str - 1) * len(train_loader) global_step = int( utils.get_steps(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth")) ) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 try: _, optim_wd, wd_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "WD_*.pth"), net_wd, optim_wd, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_wd.param_groups[0].get("initial_lr"): optim_wd.param_groups[0]["initial_lr"] = wd_resume_lr except Exception as e: print(e) scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_wd = torch.optim.lr_scheduler.ExponentialLR( optim_wd, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.bf16_run) wl = WavLMLoss( hps.model.slm.model, net_wd, hps.data.sampling_rate, hps.model.slm.sr, ).to(local_rank) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() scheduler_wd.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1).float(), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw_.detach(), logw.detach(), g.detach(), ) y_dur_hat_r_sdp, y_dur_hat_g_sdp = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw_.detach(), logw_sdp.detach(), g.detach(), ) y_dur_hat_r = y_dur_hat_r + y_dur_hat_r_sdp y_dur_hat_g = y_dur_hat_g + y_dur_hat_g_sdp with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) # torch.nn.utils.clip_grad_norm_( # parameters=net_dur_disc.parameters(), max_norm=100 # ) grad_norm_dur = commons.clip_grad_value_( net_dur_disc.parameters(), None ) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) if getattr(hps.train, "bf16_run", False): torch.nn.utils.clip_grad_norm_(parameters=net_d.parameters(), max_norm=200) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): loss_slm = wl.discriminator( y.detach().squeeze(), y_hat.detach().squeeze() ).mean() optim_wd.zero_grad() scaler.scale(loss_slm).backward() scaler.unscale_(optim_wd) # torch.nn.utils.clip_grad_norm_(parameters=net_wd.parameters(), max_norm=200) grad_norm_wd = commons.clip_grad_value_(net_wd.parameters(), None) scaler.step(optim_wd) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: _, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw_, logw, g) _, y_dur_hat_g_sdp = net_dur_disc(hidden_x, x_mask, logw_, logw_sdp, g) y_dur_hat_g = y_dur_hat_g + y_dur_hat_g_sdp with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
loss_fm = feature_loss(fmap_r, fmap_g)
12
2023-12-27 03:09:11+00:00
24k
chinhsuanwu/ifusion-threestudio
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n # 4D Gaussian Annealing\n anneal_density_blob_std_config: Optional[dict] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )\n\n def update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ) -> None:\n if self.cfg.anneal_density_blob_std_config is not None:\n min_step = self.cfg.anneal_density_blob_std_config.min_anneal_step\n max_step = self.cfg.anneal_density_blob_std_config.max_anneal_step\n if global_step >= min_step and global_step <= max_step:\n end_val = self.cfg.anneal_density_blob_std_config.end_val\n start_val = self.cfg.anneal_density_blob_std_config.start_val\n self.density_blob_std = start_val + (global_step - min_step) * (\n end_val - start_val\n ) / (max_step - min_step)" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n elif config.otype == \"HashGridSpatialTime\":\n encoding = TCNNEncodingSpatialTime(n_input_dims, config) # 4D-fy encoding\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
14,897
(self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf(
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf(
scale_tensor(
10
2023-12-27 20:30:33+00:00
24k
open-mmlab/Amphion
modules/wenet_extractor/paraformer/paraformer.py
[ { "identifier": "MAELoss", "path": "modules/wenet_extractor/cif/predictor.py", "snippet": "class MAELoss(nn.Module):\n def __init__(self, normalize_length=False):\n super(MAELoss, self).__init__()\n self.normalize_length = normalize_length\n self.criterion = torch.nn.L1Loss(reduction=\"sum\")\n\n def forward(self, token_length, pre_token_length):\n loss_token_normalizer = token_length.size(0)\n if self.normalize_length:\n loss_token_normalizer = token_length.sum().type(torch.float32)\n loss = self.criterion(token_length, pre_token_length)\n loss = loss / loss_token_normalizer\n return loss" }, { "identifier": "Hypothesis", "path": "modules/wenet_extractor/paraformer/search/beam_search.py", "snippet": "class Hypothesis(NamedTuple):\n \"\"\"Hypothesis data type.\"\"\"\n\n yseq: torch.Tensor\n score: Union[float, torch.Tensor] = 0\n scores: Dict[str, Union[float, torch.Tensor]] = dict()\n states: Dict[str, Any] = dict()\n\n def asdict(self) -> dict:\n \"\"\"Convert data to JSON-friendly dict.\"\"\"\n return self._replace(\n yseq=self.yseq.tolist(),\n score=float(self.score),\n scores={k: float(v) for k, v in self.scores.items()},\n )._asdict()" }, { "identifier": "ASRModel", "path": "modules/wenet_extractor/transformer/asr_model.py", "snippet": "class ASRModel(torch.nn.Module):\n \"\"\"CTC-attention hybrid Encoder-Decoder model\"\"\"\n\n def __init__(\n self,\n vocab_size: int,\n encoder: TransformerEncoder,\n decoder: TransformerDecoder,\n ctc: CTC,\n ctc_weight: float = 0.5,\n ignore_id: int = IGNORE_ID,\n reverse_weight: float = 0.0,\n lsm_weight: float = 0.0,\n length_normalized_loss: bool = False,\n lfmmi_dir: str = \"\",\n ):\n assert 0.0 <= ctc_weight <= 1.0, ctc_weight\n\n super().__init__()\n # note that eos is the same as sos (equivalent ID)\n self.sos = vocab_size - 1\n self.eos = vocab_size - 1\n self.vocab_size = vocab_size\n self.ignore_id = ignore_id\n self.ctc_weight = ctc_weight\n self.reverse_weight = reverse_weight\n\n self.encoder = encoder\n self.decoder = decoder\n self.ctc = ctc\n self.criterion_att = LabelSmoothingLoss(\n size=vocab_size,\n padding_idx=ignore_id,\n smoothing=lsm_weight,\n normalize_length=length_normalized_loss,\n )\n self.lfmmi_dir = lfmmi_dir\n if self.lfmmi_dir != \"\":\n self.load_lfmmi_resource()\n\n def forward(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n text: torch.Tensor,\n text_lengths: torch.Tensor,\n ) -> Dict[str, Optional[torch.Tensor]]:\n \"\"\"Frontend + Encoder + Decoder + Calc loss\n\n Args:\n speech: (Batch, Length, ...)\n speech_lengths: (Batch, )\n text: (Batch, Length)\n text_lengths: (Batch,)\n \"\"\"\n\n assert text_lengths.dim() == 1, text_lengths.shape\n # Check that batch_size is unified\n assert (\n speech.shape[0]\n == speech_lengths.shape[0]\n == text.shape[0]\n == text_lengths.shape[0]\n ), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)\n # 1. Encoder\n encoder_out, encoder_mask = self.encoder(speech, speech_lengths)\n encoder_out_lens = encoder_mask.squeeze(1).sum(1)\n\n # 2a. Attention-decoder branch\n if self.ctc_weight != 1.0:\n loss_att, acc_att = self._calc_att_loss(\n encoder_out, encoder_mask, text, text_lengths\n )\n else:\n loss_att = None\n\n # 2b. CTC branch or LF-MMI loss\n if self.ctc_weight != 0.0:\n if self.lfmmi_dir != \"\":\n loss_ctc = self._calc_lfmmi_loss(encoder_out, encoder_mask, text)\n else:\n loss_ctc = self.ctc(encoder_out, encoder_out_lens, text, text_lengths)\n else:\n loss_ctc = None\n\n if loss_ctc is None:\n loss = loss_att\n elif loss_att is None:\n loss = loss_ctc\n else:\n loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att\n return {\"loss\": loss, \"loss_att\": loss_att, \"loss_ctc\": loss_ctc}\n\n def _calc_att_loss(\n self,\n encoder_out: torch.Tensor,\n encoder_mask: torch.Tensor,\n ys_pad: torch.Tensor,\n ys_pad_lens: torch.Tensor,\n ) -> Tuple[torch.Tensor, float]:\n ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)\n ys_in_lens = ys_pad_lens + 1\n\n # reverse the seq, used for right to left decoder\n r_ys_pad = reverse_pad_list(ys_pad, ys_pad_lens, float(self.ignore_id))\n r_ys_in_pad, r_ys_out_pad = add_sos_eos(\n r_ys_pad, self.sos, self.eos, self.ignore_id\n )\n # 1. Forward decoder\n decoder_out, r_decoder_out, _ = self.decoder(\n encoder_out,\n encoder_mask,\n ys_in_pad,\n ys_in_lens,\n r_ys_in_pad,\n self.reverse_weight,\n )\n # 2. Compute attention loss\n loss_att = self.criterion_att(decoder_out, ys_out_pad)\n r_loss_att = torch.tensor(0.0)\n if self.reverse_weight > 0.0:\n r_loss_att = self.criterion_att(r_decoder_out, r_ys_out_pad)\n loss_att = (\n loss_att * (1 - self.reverse_weight) + r_loss_att * self.reverse_weight\n )\n acc_att = th_accuracy(\n decoder_out.view(-1, self.vocab_size),\n ys_out_pad,\n ignore_label=self.ignore_id,\n )\n return loss_att, acc_att\n\n def _forward_encoder(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n decoding_chunk_size: int = -1,\n num_decoding_left_chunks: int = -1,\n simulate_streaming: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n # Let's assume B = batch_size\n # 1. Encoder\n if simulate_streaming and decoding_chunk_size > 0:\n encoder_out, encoder_mask = self.encoder.forward_chunk_by_chunk(\n speech,\n decoding_chunk_size=decoding_chunk_size,\n num_decoding_left_chunks=num_decoding_left_chunks,\n ) # (B, maxlen, encoder_dim)\n else:\n encoder_out, encoder_mask = self.encoder(\n speech,\n speech_lengths,\n decoding_chunk_size=decoding_chunk_size,\n num_decoding_left_chunks=num_decoding_left_chunks,\n ) # (B, maxlen, encoder_dim)\n return encoder_out, encoder_mask\n\n def encoder_extractor(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n decoding_chunk_size: int = -1,\n num_decoding_left_chunks: int = -1,\n simulate_streaming: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n # assert speech.shape[0] == speech_lengths[0]\n assert decoding_chunk_size != 0\n batch_size = speech.shape[0]\n\n encoder_out, encoder_mask = self._forward_encoder(\n speech,\n speech_lengths,\n decoding_chunk_size,\n num_decoding_left_chunks,\n simulate_streaming,\n ) # (B, maxlen, encoder_dim)\n\n return encoder_out\n\n def recognize(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n beam_size: int = 10,\n decoding_chunk_size: int = -1,\n num_decoding_left_chunks: int = -1,\n simulate_streaming: bool = False,\n ) -> torch.Tensor:\n \"\"\"Apply beam search on attention decoder\n\n Args:\n speech (torch.Tensor): (batch, max_len, feat_dim)\n speech_length (torch.Tensor): (batch, )\n beam_size (int): beam size for beam search\n decoding_chunk_size (int): decoding chunk for dynamic chunk\n trained model.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n 0: used for training, it's prohibited here\n simulate_streaming (bool): whether do encoder forward in a\n streaming fashion\n\n Returns:\n torch.Tensor: decoding result, (batch, max_result_len)\n \"\"\"\n assert speech.shape[0] == speech_lengths.shape[0]\n assert decoding_chunk_size != 0\n device = speech.device\n batch_size = speech.shape[0]\n\n # Let's assume B = batch_size and N = beam_size\n # 1. Encoder\n encoder_out, encoder_mask = self._forward_encoder(\n speech,\n speech_lengths,\n decoding_chunk_size,\n num_decoding_left_chunks,\n simulate_streaming,\n ) # (B, maxlen, encoder_dim)\n maxlen = encoder_out.size(1)\n encoder_dim = encoder_out.size(2)\n running_size = batch_size * beam_size\n encoder_out = (\n encoder_out.unsqueeze(1)\n .repeat(1, beam_size, 1, 1)\n .view(running_size, maxlen, encoder_dim)\n ) # (B*N, maxlen, encoder_dim)\n encoder_mask = (\n encoder_mask.unsqueeze(1)\n .repeat(1, beam_size, 1, 1)\n .view(running_size, 1, maxlen)\n ) # (B*N, 1, max_len)\n\n hyps = torch.ones([running_size, 1], dtype=torch.long, device=device).fill_(\n self.sos\n ) # (B*N, 1)\n scores = torch.tensor(\n [0.0] + [-float(\"inf\")] * (beam_size - 1), dtype=torch.float\n )\n scores = (\n scores.to(device).repeat([batch_size]).unsqueeze(1).to(device)\n ) # (B*N, 1)\n end_flag = torch.zeros_like(scores, dtype=torch.bool, device=device)\n cache: Optional[List[torch.Tensor]] = None\n # 2. Decoder forward step by step\n for i in range(1, maxlen + 1):\n # Stop if all batch and all beam produce eos\n if end_flag.sum() == running_size:\n break\n # 2.1 Forward decoder step\n hyps_mask = (\n subsequent_mask(i).unsqueeze(0).repeat(running_size, 1, 1).to(device)\n ) # (B*N, i, i)\n # logp: (B*N, vocab)\n logp, cache = self.decoder.forward_one_step(\n encoder_out, encoder_mask, hyps, hyps_mask, cache\n )\n # 2.2 First beam prune: select topk best prob at current time\n top_k_logp, top_k_index = logp.topk(beam_size) # (B*N, N)\n top_k_logp = mask_finished_scores(top_k_logp, end_flag)\n top_k_index = mask_finished_preds(top_k_index, end_flag, self.eos)\n # 2.3 Second beam prune: select topk score with history\n scores = scores + top_k_logp # (B*N, N), broadcast add\n scores = scores.view(batch_size, beam_size * beam_size) # (B, N*N)\n scores, offset_k_index = scores.topk(k=beam_size) # (B, N)\n # Update cache to be consistent with new topk scores / hyps\n cache_index = (offset_k_index // beam_size).view(-1) # (B*N)\n base_cache_index = (\n torch.arange(batch_size, device=device)\n .view(-1, 1)\n .repeat([1, beam_size])\n * beam_size\n ).view(\n -1\n ) # (B*N)\n cache_index = base_cache_index + cache_index\n cache = [torch.index_select(c, dim=0, index=cache_index) for c in cache]\n scores = scores.view(-1, 1) # (B*N, 1)\n # 2.4. Compute base index in top_k_index,\n # regard top_k_index as (B*N*N),regard offset_k_index as (B*N),\n # then find offset_k_index in top_k_index\n base_k_index = (\n torch.arange(batch_size, device=device)\n .view(-1, 1)\n .repeat([1, beam_size])\n ) # (B, N)\n base_k_index = base_k_index * beam_size * beam_size\n best_k_index = base_k_index.view(-1) + offset_k_index.view(-1) # (B*N)\n\n # 2.5 Update best hyps\n best_k_pred = torch.index_select(\n top_k_index.view(-1), dim=-1, index=best_k_index\n ) # (B*N)\n best_hyps_index = best_k_index // beam_size\n last_best_k_hyps = torch.index_select(\n hyps, dim=0, index=best_hyps_index\n ) # (B*N, i)\n hyps = torch.cat(\n (last_best_k_hyps, best_k_pred.view(-1, 1)), dim=1\n ) # (B*N, i+1)\n\n # 2.6 Update end flag\n end_flag = torch.eq(hyps[:, -1], self.eos).view(-1, 1)\n\n # 3. Select best of best\n scores = scores.view(batch_size, beam_size)\n # TODO: length normalization\n best_scores, best_index = scores.max(dim=-1)\n best_hyps_index = (\n best_index\n + torch.arange(batch_size, dtype=torch.long, device=device) * beam_size\n )\n best_hyps = torch.index_select(hyps, dim=0, index=best_hyps_index)\n best_hyps = best_hyps[:, 1:]\n return best_hyps, best_scores\n\n def ctc_greedy_search(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n decoding_chunk_size: int = -1,\n num_decoding_left_chunks: int = -1,\n simulate_streaming: bool = False,\n ) -> List[List[int]]:\n \"\"\"Apply CTC greedy search\n\n Args:\n speech (torch.Tensor): (batch, max_len, feat_dim)\n speech_length (torch.Tensor): (batch, )\n beam_size (int): beam size for beam search\n decoding_chunk_size (int): decoding chunk for dynamic chunk\n trained model.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n 0: used for training, it's prohibited here\n simulate_streaming (bool): whether do encoder forward in a\n streaming fashion\n Returns:\n List[List[int]]: best path result\n \"\"\"\n assert speech.shape[0] == speech_lengths.shape[0]\n assert decoding_chunk_size != 0\n batch_size = speech.shape[0]\n # Let's assume B = batch_size\n encoder_out, encoder_mask = self._forward_encoder(\n speech,\n speech_lengths,\n decoding_chunk_size,\n num_decoding_left_chunks,\n simulate_streaming,\n ) # (B, maxlen, encoder_dim)\n maxlen = encoder_out.size(1)\n encoder_out_lens = encoder_mask.squeeze(1).sum(1)\n ctc_probs = self.ctc.log_softmax(encoder_out) # (B, maxlen, vocab_size)\n topk_prob, topk_index = ctc_probs.topk(1, dim=2) # (B, maxlen, 1)\n topk_index = topk_index.view(batch_size, maxlen) # (B, maxlen)\n mask = make_pad_mask(encoder_out_lens, maxlen) # (B, maxlen)\n topk_index = topk_index.masked_fill_(mask, self.eos) # (B, maxlen)\n hyps = [hyp.tolist() for hyp in topk_index]\n scores = topk_prob.max(1)\n hyps = [remove_duplicates_and_blank(hyp) for hyp in hyps]\n return hyps, scores\n\n def _ctc_prefix_beam_search(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n beam_size: int,\n decoding_chunk_size: int = -1,\n num_decoding_left_chunks: int = -1,\n simulate_streaming: bool = False,\n ) -> Tuple[List[List[int]], torch.Tensor]:\n \"\"\"CTC prefix beam search inner implementation\n\n Args:\n speech (torch.Tensor): (batch, max_len, feat_dim)\n speech_length (torch.Tensor): (batch, )\n beam_size (int): beam size for beam search\n decoding_chunk_size (int): decoding chunk for dynamic chunk\n trained model.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n 0: used for training, it's prohibited here\n simulate_streaming (bool): whether do encoder forward in a\n streaming fashion\n\n Returns:\n List[List[int]]: nbest results\n torch.Tensor: encoder output, (1, max_len, encoder_dim),\n it will be used for rescoring in attention rescoring mode\n \"\"\"\n assert speech.shape[0] == speech_lengths.shape[0]\n assert decoding_chunk_size != 0\n batch_size = speech.shape[0]\n # For CTC prefix beam search, we only support batch_size=1\n assert batch_size == 1\n # Let's assume B = batch_size and N = beam_size\n # 1. Encoder forward and get CTC score\n encoder_out, encoder_mask = self._forward_encoder(\n speech,\n speech_lengths,\n decoding_chunk_size,\n num_decoding_left_chunks,\n simulate_streaming,\n ) # (B, maxlen, encoder_dim)\n maxlen = encoder_out.size(1)\n ctc_probs = self.ctc.log_softmax(encoder_out) # (1, maxlen, vocab_size)\n ctc_probs = ctc_probs.squeeze(0)\n # cur_hyps: (prefix, (blank_ending_score, none_blank_ending_score))\n cur_hyps = [(tuple(), (0.0, -float(\"inf\")))]\n # 2. CTC beam search step by step\n for t in range(0, maxlen):\n logp = ctc_probs[t] # (vocab_size,)\n # key: prefix, value (pb, pnb), default value(-inf, -inf)\n next_hyps = defaultdict(lambda: (-float(\"inf\"), -float(\"inf\")))\n # 2.1 First beam prune: select topk best\n top_k_logp, top_k_index = logp.topk(beam_size) # (beam_size,)\n for s in top_k_index:\n s = s.item()\n ps = logp[s].item()\n for prefix, (pb, pnb) in cur_hyps:\n last = prefix[-1] if len(prefix) > 0 else None\n if s == 0: # blank\n n_pb, n_pnb = next_hyps[prefix]\n n_pb = log_add([n_pb, pb + ps, pnb + ps])\n next_hyps[prefix] = (n_pb, n_pnb)\n elif s == last:\n # Update *ss -> *s;\n n_pb, n_pnb = next_hyps[prefix]\n n_pnb = log_add([n_pnb, pnb + ps])\n next_hyps[prefix] = (n_pb, n_pnb)\n # Update *s-s -> *ss, - is for blank\n n_prefix = prefix + (s,)\n n_pb, n_pnb = next_hyps[n_prefix]\n n_pnb = log_add([n_pnb, pb + ps])\n next_hyps[n_prefix] = (n_pb, n_pnb)\n else:\n n_prefix = prefix + (s,)\n n_pb, n_pnb = next_hyps[n_prefix]\n n_pnb = log_add([n_pnb, pb + ps, pnb + ps])\n next_hyps[n_prefix] = (n_pb, n_pnb)\n\n # 2.2 Second beam prune\n next_hyps = sorted(\n next_hyps.items(), key=lambda x: log_add(list(x[1])), reverse=True\n )\n cur_hyps = next_hyps[:beam_size]\n hyps = [(y[0], log_add([y[1][0], y[1][1]])) for y in cur_hyps]\n return hyps, encoder_out\n\n def ctc_prefix_beam_search(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n beam_size: int,\n decoding_chunk_size: int = -1,\n num_decoding_left_chunks: int = -1,\n simulate_streaming: bool = False,\n ) -> List[int]:\n \"\"\"Apply CTC prefix beam search\n\n Args:\n speech (torch.Tensor): (batch, max_len, feat_dim)\n speech_length (torch.Tensor): (batch, )\n beam_size (int): beam size for beam search\n decoding_chunk_size (int): decoding chunk for dynamic chunk\n trained model.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n 0: used for training, it's prohibited here\n simulate_streaming (bool): whether do encoder forward in a\n streaming fashion\n\n Returns:\n List[int]: CTC prefix beam search nbest results\n \"\"\"\n hyps, _ = self._ctc_prefix_beam_search(\n speech,\n speech_lengths,\n beam_size,\n decoding_chunk_size,\n num_decoding_left_chunks,\n simulate_streaming,\n )\n return hyps[0]\n\n def attention_rescoring(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n beam_size: int,\n decoding_chunk_size: int = -1,\n num_decoding_left_chunks: int = -1,\n ctc_weight: float = 0.0,\n simulate_streaming: bool = False,\n reverse_weight: float = 0.0,\n ) -> List[int]:\n \"\"\"Apply attention rescoring decoding, CTC prefix beam search\n is applied first to get nbest, then we resoring the nbest on\n attention decoder with corresponding encoder out\n\n Args:\n speech (torch.Tensor): (batch, max_len, feat_dim)\n speech_length (torch.Tensor): (batch, )\n beam_size (int): beam size for beam search\n decoding_chunk_size (int): decoding chunk for dynamic chunk\n trained model.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n 0: used for training, it's prohibited here\n simulate_streaming (bool): whether do encoder forward in a\n streaming fashion\n reverse_weight (float): right to left decoder weight\n ctc_weight (float): ctc score weight\n\n Returns:\n List[int]: Attention rescoring result\n \"\"\"\n assert speech.shape[0] == speech_lengths.shape[0]\n assert decoding_chunk_size != 0\n if reverse_weight > 0.0:\n # decoder should be a bitransformer decoder if reverse_weight > 0.0\n assert hasattr(self.decoder, \"right_decoder\")\n device = speech.device\n batch_size = speech.shape[0]\n # For attention rescoring we only support batch_size=1\n assert batch_size == 1\n # encoder_out: (1, maxlen, encoder_dim), len(hyps) = beam_size\n hyps, encoder_out = self._ctc_prefix_beam_search(\n speech,\n speech_lengths,\n beam_size,\n decoding_chunk_size,\n num_decoding_left_chunks,\n simulate_streaming,\n )\n\n assert len(hyps) == beam_size\n hyps_pad = pad_sequence(\n [torch.tensor(hyp[0], device=device, dtype=torch.long) for hyp in hyps],\n True,\n self.ignore_id,\n ) # (beam_size, max_hyps_len)\n ori_hyps_pad = hyps_pad\n hyps_lens = torch.tensor(\n [len(hyp[0]) for hyp in hyps], device=device, dtype=torch.long\n ) # (beam_size,)\n hyps_pad, _ = add_sos_eos(hyps_pad, self.sos, self.eos, self.ignore_id)\n hyps_lens = hyps_lens + 1 # Add <sos> at begining\n encoder_out = encoder_out.repeat(beam_size, 1, 1)\n encoder_mask = torch.ones(\n beam_size, 1, encoder_out.size(1), dtype=torch.bool, device=device\n )\n # used for right to left decoder\n r_hyps_pad = reverse_pad_list(ori_hyps_pad, hyps_lens, self.ignore_id)\n r_hyps_pad, _ = add_sos_eos(r_hyps_pad, self.sos, self.eos, self.ignore_id)\n decoder_out, r_decoder_out, _ = self.decoder(\n encoder_out, encoder_mask, hyps_pad, hyps_lens, r_hyps_pad, reverse_weight\n ) # (beam_size, max_hyps_len, vocab_size)\n decoder_out = torch.nn.functional.log_softmax(decoder_out, dim=-1)\n decoder_out = decoder_out.cpu().numpy()\n # r_decoder_out will be 0.0, if reverse_weight is 0.0 or decoder is a\n # conventional transformer decoder.\n r_decoder_out = torch.nn.functional.log_softmax(r_decoder_out, dim=-1)\n r_decoder_out = r_decoder_out.cpu().numpy()\n # Only use decoder score for rescoring\n best_score = -float(\"inf\")\n best_index = 0\n for i, hyp in enumerate(hyps):\n score = 0.0\n for j, w in enumerate(hyp[0]):\n score += decoder_out[i][j][w]\n score += decoder_out[i][len(hyp[0])][self.eos]\n # add right to left decoder score\n if reverse_weight > 0:\n r_score = 0.0\n for j, w in enumerate(hyp[0]):\n r_score += r_decoder_out[i][len(hyp[0]) - j - 1][w]\n r_score += r_decoder_out[i][len(hyp[0])][self.eos]\n score = score * (1 - reverse_weight) + r_score * reverse_weight\n # add ctc score\n score += hyp[1] * ctc_weight\n if score > best_score:\n best_score = score\n best_index = i\n return hyps[best_index][0], best_score\n\n @torch.jit.unused\n def load_lfmmi_resource(self):\n with open(\"{}/tokens.txt\".format(self.lfmmi_dir), \"r\") as fin:\n for line in fin:\n arr = line.strip().split()\n if arr[0] == \"<sos/eos>\":\n self.sos_eos_id = int(arr[1])\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.graph_compiler = MmiTrainingGraphCompiler(\n self.lfmmi_dir,\n device=device,\n oov=\"<UNK>\",\n sos_id=self.sos_eos_id,\n eos_id=self.sos_eos_id,\n )\n self.lfmmi = LFMMILoss(\n graph_compiler=self.graph_compiler,\n den_scale=1,\n use_pruned_intersect=False,\n )\n self.word_table = {}\n with open(\"{}/words.txt\".format(self.lfmmi_dir), \"r\") as fin:\n for line in fin:\n arr = line.strip().split()\n assert len(arr) == 2\n self.word_table[int(arr[1])] = arr[0]\n\n @torch.jit.unused\n def _calc_lfmmi_loss(self, encoder_out, encoder_mask, text):\n ctc_probs = self.ctc.log_softmax(encoder_out)\n supervision_segments = torch.stack(\n (\n torch.arange(len(encoder_mask)),\n torch.zeros(len(encoder_mask)),\n encoder_mask.squeeze(dim=1).sum(dim=1).to(\"cpu\"),\n ),\n 1,\n ).to(torch.int32)\n dense_fsa_vec = k2.DenseFsaVec(\n ctc_probs,\n supervision_segments,\n allow_truncate=3,\n )\n text = [\n \" \".join([self.word_table[j.item()] for j in i if j != -1]) for i in text\n ]\n loss = self.lfmmi(dense_fsa_vec=dense_fsa_vec, texts=text) / len(text)\n return loss\n\n def load_hlg_resource_if_necessary(self, hlg, word):\n if not hasattr(self, \"hlg\"):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.hlg = k2.Fsa.from_dict(torch.load(hlg, map_location=device))\n if not hasattr(self.hlg, \"lm_scores\"):\n self.hlg.lm_scores = self.hlg.scores.clone()\n if not hasattr(self, \"word_table\"):\n self.word_table = {}\n with open(word, \"r\") as fin:\n for line in fin:\n arr = line.strip().split()\n assert len(arr) == 2\n self.word_table[int(arr[1])] = arr[0]\n\n @torch.no_grad()\n def hlg_onebest(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n decoding_chunk_size: int = -1,\n num_decoding_left_chunks: int = -1,\n simulate_streaming: bool = False,\n hlg: str = \"\",\n word: str = \"\",\n symbol_table: Dict[str, int] = None,\n ) -> List[int]:\n self.load_hlg_resource_if_necessary(hlg, word)\n encoder_out, encoder_mask = self._forward_encoder(\n speech,\n speech_lengths,\n decoding_chunk_size,\n num_decoding_left_chunks,\n simulate_streaming,\n ) # (B, maxlen, encoder_dim)\n ctc_probs = self.ctc.log_softmax(encoder_out) # (1, maxlen, vocab_size)\n supervision_segments = torch.stack(\n (\n torch.arange(len(encoder_mask)),\n torch.zeros(len(encoder_mask)),\n encoder_mask.squeeze(dim=1).sum(dim=1).cpu(),\n ),\n 1,\n ).to(torch.int32)\n lattice = get_lattice(\n nnet_output=ctc_probs,\n decoding_graph=self.hlg,\n supervision_segments=supervision_segments,\n search_beam=20,\n output_beam=7,\n min_active_states=30,\n max_active_states=10000,\n subsampling_factor=4,\n )\n best_path = one_best_decoding(lattice=lattice, use_double_scores=True)\n hyps = get_texts(best_path)\n hyps = [[symbol_table[k] for j in i for k in self.word_table[j]] for i in hyps]\n return hyps\n\n @torch.no_grad()\n def hlg_rescore(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n decoding_chunk_size: int = -1,\n num_decoding_left_chunks: int = -1,\n simulate_streaming: bool = False,\n lm_scale: float = 0,\n decoder_scale: float = 0,\n r_decoder_scale: float = 0,\n hlg: str = \"\",\n word: str = \"\",\n symbol_table: Dict[str, int] = None,\n ) -> List[int]:\n self.load_hlg_resource_if_necessary(hlg, word)\n device = speech.device\n encoder_out, encoder_mask = self._forward_encoder(\n speech,\n speech_lengths,\n decoding_chunk_size,\n num_decoding_left_chunks,\n simulate_streaming,\n ) # (B, maxlen, encoder_dim)\n ctc_probs = self.ctc.log_softmax(encoder_out) # (1, maxlen, vocab_size)\n supervision_segments = torch.stack(\n (\n torch.arange(len(encoder_mask)),\n torch.zeros(len(encoder_mask)),\n encoder_mask.squeeze(dim=1).sum(dim=1).cpu(),\n ),\n 1,\n ).to(torch.int32)\n lattice = get_lattice(\n nnet_output=ctc_probs,\n decoding_graph=self.hlg,\n supervision_segments=supervision_segments,\n search_beam=20,\n output_beam=7,\n min_active_states=30,\n max_active_states=10000,\n subsampling_factor=4,\n )\n nbest = Nbest.from_lattice(\n lattice=lattice,\n num_paths=100,\n use_double_scores=True,\n nbest_scale=0.5,\n )\n nbest = nbest.intersect(lattice)\n assert hasattr(nbest.fsa, \"lm_scores\")\n assert hasattr(nbest.fsa, \"tokens\")\n assert isinstance(nbest.fsa.tokens, torch.Tensor)\n\n tokens_shape = nbest.fsa.arcs.shape().remove_axis(1)\n tokens = k2.RaggedTensor(tokens_shape, nbest.fsa.tokens)\n tokens = tokens.remove_values_leq(0)\n hyps = tokens.tolist()\n\n # cal attention_score\n hyps_pad = pad_sequence(\n [torch.tensor(hyp, device=device, dtype=torch.long) for hyp in hyps],\n True,\n self.ignore_id,\n ) # (beam_size, max_hyps_len)\n ori_hyps_pad = hyps_pad\n hyps_lens = torch.tensor(\n [len(hyp) for hyp in hyps], device=device, dtype=torch.long\n ) # (beam_size,)\n hyps_pad, _ = add_sos_eos(hyps_pad, self.sos, self.eos, self.ignore_id)\n hyps_lens = hyps_lens + 1 # Add <sos> at begining\n encoder_out_repeat = []\n tot_scores = nbest.tot_scores()\n repeats = [tot_scores[i].shape[0] for i in range(tot_scores.dim0)]\n for i in range(len(encoder_out)):\n encoder_out_repeat.append(encoder_out[i : i + 1].repeat(repeats[i], 1, 1))\n encoder_out = torch.concat(encoder_out_repeat, dim=0)\n encoder_mask = torch.ones(\n encoder_out.size(0), 1, encoder_out.size(1), dtype=torch.bool, device=device\n )\n # used for right to left decoder\n r_hyps_pad = reverse_pad_list(ori_hyps_pad, hyps_lens, self.ignore_id)\n r_hyps_pad, _ = add_sos_eos(r_hyps_pad, self.sos, self.eos, self.ignore_id)\n reverse_weight = 0.5\n decoder_out, r_decoder_out, _ = self.decoder(\n encoder_out, encoder_mask, hyps_pad, hyps_lens, r_hyps_pad, reverse_weight\n ) # (beam_size, max_hyps_len, vocab_size)\n decoder_out = torch.nn.functional.log_softmax(decoder_out, dim=-1)\n decoder_out = decoder_out\n # r_decoder_out will be 0.0, if reverse_weight is 0.0 or decoder is a\n # conventional transformer decoder.\n r_decoder_out = torch.nn.functional.log_softmax(r_decoder_out, dim=-1)\n r_decoder_out = r_decoder_out\n\n decoder_scores = torch.tensor(\n [\n sum([decoder_out[i, j, hyps[i][j]] for j in range(len(hyps[i]))])\n for i in range(len(hyps))\n ],\n device=device,\n )\n r_decoder_scores = []\n for i in range(len(hyps)):\n score = 0\n for j in range(len(hyps[i])):\n score += r_decoder_out[i, len(hyps[i]) - j - 1, hyps[i][j]]\n score += r_decoder_out[i, len(hyps[i]), self.eos]\n r_decoder_scores.append(score)\n r_decoder_scores = torch.tensor(r_decoder_scores, device=device)\n\n am_scores = nbest.compute_am_scores()\n ngram_lm_scores = nbest.compute_lm_scores()\n tot_scores = (\n am_scores.values\n + lm_scale * ngram_lm_scores.values\n + decoder_scale * decoder_scores\n + r_decoder_scale * r_decoder_scores\n )\n ragged_tot_scores = k2.RaggedTensor(nbest.shape, tot_scores)\n max_indexes = ragged_tot_scores.argmax()\n best_path = k2.index_fsa(nbest.fsa, max_indexes)\n hyps = get_texts(best_path)\n hyps = [[symbol_table[k] for j in i for k in self.word_table[j]] for i in hyps]\n return hyps\n\n @torch.jit.export\n def subsampling_rate(self) -> int:\n \"\"\"Export interface for c++ call, return subsampling_rate of the\n model\n \"\"\"\n return self.encoder.embed.subsampling_rate\n\n @torch.jit.export\n def right_context(self) -> int:\n \"\"\"Export interface for c++ call, return right_context of the model\"\"\"\n return self.encoder.embed.right_context\n\n @torch.jit.export\n def sos_symbol(self) -> int:\n \"\"\"Export interface for c++ call, return sos symbol id of the model\"\"\"\n return self.sos\n\n @torch.jit.export\n def eos_symbol(self) -> int:\n \"\"\"Export interface for c++ call, return eos symbol id of the model\"\"\"\n return self.eos\n\n @torch.jit.export\n def forward_encoder_chunk(\n self,\n xs: torch.Tensor,\n offset: int,\n required_cache_size: int,\n att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),\n cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\" Export interface for c++ call, give input chunk xs, and return\n output from time 0 to current chunk.\n\n Args:\n xs (torch.Tensor): chunk input, with shape (b=1, time, mel-dim),\n where `time == (chunk_size - 1) * subsample_rate + \\\n subsample.right_context + 1`\n offset (int): current offset in encoder output time stamp\n required_cache_size (int): cache size required for next chunk\n compuation\n >=0: actual cache size\n <0: means all history cache is required\n att_cache (torch.Tensor): cache tensor for KEY & VALUE in\n transformer/conformer attention, with shape\n (elayers, head, cache_t1, d_k * 2), where\n `head * d_k == hidden-dim` and\n `cache_t1 == chunk_size * num_decoding_left_chunks`.\n cnn_cache (torch.Tensor): cache tensor for cnn_module in conformer,\n (elayers, b=1, hidden-dim, cache_t2), where\n `cache_t2 == cnn.lorder - 1`\n\n Returns:\n torch.Tensor: output of current input xs,\n with shape (b=1, chunk_size, hidden-dim).\n torch.Tensor: new attention cache required for next chunk, with\n dynamic shape (elayers, head, ?, d_k * 2)\n depending on required_cache_size.\n torch.Tensor: new conformer cnn cache required for next chunk, with\n same shape as the original cnn_cache.\n\n \"\"\"\n return self.encoder.forward_chunk(\n xs, offset, required_cache_size, att_cache, cnn_cache\n )\n\n @torch.jit.export\n def ctc_activation(self, xs: torch.Tensor) -> torch.Tensor:\n \"\"\"Export interface for c++ call, apply linear transform and log\n softmax before ctc\n Args:\n xs (torch.Tensor): encoder output\n\n Returns:\n torch.Tensor: activation before ctc\n\n \"\"\"\n return self.ctc.log_softmax(xs)\n\n @torch.jit.export\n def is_bidirectional_decoder(self) -> bool:\n \"\"\"\n Returns:\n torch.Tensor: decoder output\n \"\"\"\n if hasattr(self.decoder, \"right_decoder\"):\n return True\n else:\n return False\n\n @torch.jit.export\n def forward_attention_decoder(\n self,\n hyps: torch.Tensor,\n hyps_lens: torch.Tensor,\n encoder_out: torch.Tensor,\n reverse_weight: float = 0,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Export interface for c++ call, forward decoder with multiple\n hypothesis from ctc prefix beam search and one encoder output\n Args:\n hyps (torch.Tensor): hyps from ctc prefix beam search, already\n pad sos at the begining\n hyps_lens (torch.Tensor): length of each hyp in hyps\n encoder_out (torch.Tensor): corresponding encoder output\n r_hyps (torch.Tensor): hyps from ctc prefix beam search, already\n pad eos at the begining which is used fo right to left decoder\n reverse_weight: used for verfing whether used right to left decoder,\n > 0 will use.\n\n Returns:\n torch.Tensor: decoder output\n \"\"\"\n assert encoder_out.size(0) == 1\n num_hyps = hyps.size(0)\n assert hyps_lens.size(0) == num_hyps\n encoder_out = encoder_out.repeat(num_hyps, 1, 1)\n encoder_mask = torch.ones(\n num_hyps,\n 1,\n encoder_out.size(1),\n dtype=torch.bool,\n device=encoder_out.device,\n )\n\n # input for right to left decoder\n # this hyps_lens has count <sos> token, we need minus it.\n r_hyps_lens = hyps_lens - 1\n # this hyps has included <sos> token, so it should be\n # convert the original hyps.\n r_hyps = hyps[:, 1:]\n # >>> r_hyps\n # >>> tensor([[ 1, 2, 3],\n # >>> [ 9, 8, 4],\n # >>> [ 2, -1, -1]])\n # >>> r_hyps_lens\n # >>> tensor([3, 3, 1])\n\n # NOTE(Mddct): `pad_sequence` is not supported by ONNX, it is used\n # in `reverse_pad_list` thus we have to refine the below code.\n # Issue: https://github.com/wenet-e2e/wenet/issues/1113\n # Equal to:\n # >>> r_hyps = reverse_pad_list(r_hyps, r_hyps_lens, float(self.ignore_id))\n # >>> r_hyps, _ = add_sos_eos(r_hyps, self.sos, self.eos, self.ignore_id)\n max_len = torch.max(r_hyps_lens)\n index_range = torch.arange(0, max_len, 1).to(encoder_out.device)\n seq_len_expand = r_hyps_lens.unsqueeze(1)\n seq_mask = seq_len_expand > index_range # (beam, max_len)\n # >>> seq_mask\n # >>> tensor([[ True, True, True],\n # >>> [ True, True, True],\n # >>> [ True, False, False]])\n index = (seq_len_expand - 1) - index_range # (beam, max_len)\n # >>> index\n # >>> tensor([[ 2, 1, 0],\n # >>> [ 2, 1, 0],\n # >>> [ 0, -1, -2]])\n index = index * seq_mask\n # >>> index\n # >>> tensor([[2, 1, 0],\n # >>> [2, 1, 0],\n # >>> [0, 0, 0]])\n r_hyps = torch.gather(r_hyps, 1, index)\n # >>> r_hyps\n # >>> tensor([[3, 2, 1],\n # >>> [4, 8, 9],\n # >>> [2, 2, 2]])\n r_hyps = torch.where(seq_mask, r_hyps, self.eos)\n # >>> r_hyps\n # >>> tensor([[3, 2, 1],\n # >>> [4, 8, 9],\n # >>> [2, eos, eos]])\n r_hyps = torch.cat([hyps[:, 0:1], r_hyps], dim=1)\n # >>> r_hyps\n # >>> tensor([[sos, 3, 2, 1],\n # >>> [sos, 4, 8, 9],\n # >>> [sos, 2, eos, eos]])\n\n decoder_out, r_decoder_out, _ = self.decoder(\n encoder_out, encoder_mask, hyps, hyps_lens, r_hyps, reverse_weight\n ) # (num_hyps, max_hyps_len, vocab_size)\n decoder_out = torch.nn.functional.log_softmax(decoder_out, dim=-1)\n\n # right to left decoder may be not used during decoding process,\n # which depends on reverse_weight param.\n # r_dccoder_out will be 0.0, if reverse_weight is 0.0\n r_decoder_out = torch.nn.functional.log_softmax(r_decoder_out, dim=-1)\n return decoder_out, r_decoder_out" }, { "identifier": "CTC", "path": "modules/wenet_extractor/transformer/ctc.py", "snippet": "class CTC(torch.nn.Module):\n \"\"\"CTC module\"\"\"\n\n def __init__(\n self,\n odim: int,\n encoder_output_size: int,\n dropout_rate: float = 0.0,\n reduce: bool = True,\n ):\n \"\"\"Construct CTC module\n Args:\n odim: dimension of outputs\n encoder_output_size: number of encoder projection units\n dropout_rate: dropout rate (0.0 ~ 1.0)\n reduce: reduce the CTC loss into a scalar\n \"\"\"\n super().__init__()\n eprojs = encoder_output_size\n self.dropout_rate = dropout_rate\n self.ctc_lo = torch.nn.Linear(eprojs, odim)\n\n reduction_type = \"sum\" if reduce else \"none\"\n self.ctc_loss = torch.nn.CTCLoss(reduction=reduction_type)\n\n def forward(\n self,\n hs_pad: torch.Tensor,\n hlens: torch.Tensor,\n ys_pad: torch.Tensor,\n ys_lens: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"Calculate CTC loss.\n\n Args:\n hs_pad: batch of padded hidden state sequences (B, Tmax, D)\n hlens: batch of lengths of hidden state sequences (B)\n ys_pad: batch of padded character id sequence tensor (B, Lmax)\n ys_lens: batch of lengths of character sequence (B)\n \"\"\"\n # hs_pad: (B, L, NProj) -> ys_hat: (B, L, Nvocab)\n ys_hat = self.ctc_lo(F.dropout(hs_pad, p=self.dropout_rate))\n # ys_hat: (B, L, D) -> (L, B, D)\n ys_hat = ys_hat.transpose(0, 1)\n ys_hat = ys_hat.log_softmax(2)\n loss = self.ctc_loss(ys_hat, ys_pad, hlens, ys_lens)\n # Batch-size average\n loss = loss / ys_hat.size(1)\n return loss\n\n def log_softmax(self, hs_pad: torch.Tensor) -> torch.Tensor:\n \"\"\"log_softmax of frame activations\n\n Args:\n Tensor hs_pad: 3d tensor (B, Tmax, eprojs)\n Returns:\n torch.Tensor: log softmax applied 3d tensor (B, Tmax, odim)\n \"\"\"\n return F.log_softmax(self.ctc_lo(hs_pad), dim=2)\n\n def argmax(self, hs_pad: torch.Tensor) -> torch.Tensor:\n \"\"\"argmax of frame activations\n\n Args:\n torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)\n Returns:\n torch.Tensor: argmax applied 2d tensor (B, Tmax)\n \"\"\"\n return torch.argmax(self.ctc_lo(hs_pad), dim=2)" }, { "identifier": "TransformerDecoder", "path": "modules/wenet_extractor/transformer/decoder.py", "snippet": "class TransformerDecoder(torch.nn.Module):\n \"\"\"Base class of Transfomer decoder module.\n Args:\n vocab_size: output dim\n encoder_output_size: dimension of attention\n attention_heads: the number of heads of multi head attention\n linear_units: the hidden units number of position-wise feedforward\n num_blocks: the number of decoder blocks\n dropout_rate: dropout rate\n self_attention_dropout_rate: dropout rate for attention\n input_layer: input layer type\n use_output_layer: whether to use output layer\n pos_enc_class: PositionalEncoding or ScaledPositionalEncoding\n normalize_before:\n True: use layer_norm before each sub-block of a layer.\n False: use layer_norm after each sub-block of a layer.\n src_attention: if false, encoder-decoder cross attention is not\n applied, such as CIF model\n \"\"\"\n\n def __init__(\n self,\n vocab_size: int,\n encoder_output_size: int,\n attention_heads: int = 4,\n linear_units: int = 2048,\n num_blocks: int = 6,\n dropout_rate: float = 0.1,\n positional_dropout_rate: float = 0.1,\n self_attention_dropout_rate: float = 0.0,\n src_attention_dropout_rate: float = 0.0,\n input_layer: str = \"embed\",\n use_output_layer: bool = True,\n normalize_before: bool = True,\n src_attention: bool = True,\n ):\n super().__init__()\n attention_dim = encoder_output_size\n\n if input_layer == \"embed\":\n self.embed = torch.nn.Sequential(\n torch.nn.Embedding(vocab_size, attention_dim),\n PositionalEncoding(attention_dim, positional_dropout_rate),\n )\n elif input_layer == \"none\":\n self.embed = NoPositionalEncoding(attention_dim, positional_dropout_rate)\n else:\n raise ValueError(f\"only 'embed' is supported: {input_layer}\")\n\n self.normalize_before = normalize_before\n self.after_norm = torch.nn.LayerNorm(attention_dim, eps=1e-5)\n self.use_output_layer = use_output_layer\n self.output_layer = torch.nn.Linear(attention_dim, vocab_size)\n self.num_blocks = num_blocks\n self.decoders = torch.nn.ModuleList(\n [\n DecoderLayer(\n attention_dim,\n MultiHeadedAttention(\n attention_heads, attention_dim, self_attention_dropout_rate\n ),\n MultiHeadedAttention(\n attention_heads, attention_dim, src_attention_dropout_rate\n )\n if src_attention\n else None,\n PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),\n dropout_rate,\n normalize_before,\n )\n for _ in range(self.num_blocks)\n ]\n )\n\n def forward(\n self,\n memory: torch.Tensor,\n memory_mask: torch.Tensor,\n ys_in_pad: torch.Tensor,\n ys_in_lens: torch.Tensor,\n r_ys_in_pad: torch.Tensor = torch.empty(0),\n reverse_weight: float = 0.0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Forward decoder.\n Args:\n memory: encoded memory, float32 (batch, maxlen_in, feat)\n memory_mask: encoder memory mask, (batch, 1, maxlen_in)\n ys_in_pad: padded input token ids, int64 (batch, maxlen_out)\n ys_in_lens: input lengths of this batch (batch)\n r_ys_in_pad: not used in transformer decoder, in order to unify api\n with bidirectional decoder\n reverse_weight: not used in transformer decoder, in order to unify\n api with bidirectional decode\n Returns:\n (tuple): tuple containing:\n x: decoded token score before softmax (batch, maxlen_out,\n vocab_size) if use_output_layer is True,\n torch.tensor(0.0), in order to unify api with bidirectional decoder\n olens: (batch, )\n \"\"\"\n tgt = ys_in_pad\n maxlen = tgt.size(1)\n # tgt_mask: (B, 1, L)\n tgt_mask = ~make_pad_mask(ys_in_lens, maxlen).unsqueeze(1)\n tgt_mask = tgt_mask.to(tgt.device)\n # m: (1, L, L)\n m = subsequent_mask(tgt_mask.size(-1), device=tgt_mask.device).unsqueeze(0)\n # tgt_mask: (B, L, L)\n tgt_mask = tgt_mask & m\n x, _ = self.embed(tgt)\n for layer in self.decoders:\n x, tgt_mask, memory, memory_mask = layer(x, tgt_mask, memory, memory_mask)\n if self.normalize_before:\n x = self.after_norm(x)\n if self.use_output_layer:\n x = self.output_layer(x)\n olens = tgt_mask.sum(1)\n return x, torch.tensor(0.0), olens\n\n def forward_one_step(\n self,\n memory: torch.Tensor,\n memory_mask: torch.Tensor,\n tgt: torch.Tensor,\n tgt_mask: torch.Tensor,\n cache: Optional[List[torch.Tensor]] = None,\n ) -> Tuple[torch.Tensor, List[torch.Tensor]]:\n \"\"\"Forward one step.\n This is only used for decoding.\n Args:\n memory: encoded memory, float32 (batch, maxlen_in, feat)\n memory_mask: encoded memory mask, (batch, 1, maxlen_in)\n tgt: input token ids, int64 (batch, maxlen_out)\n tgt_mask: input token mask, (batch, maxlen_out)\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (include 1.2)\n cache: cached output list of (batch, max_time_out-1, size)\n Returns:\n y, cache: NN output value and cache per `self.decoders`.\n y.shape` is (batch, maxlen_out, token)\n \"\"\"\n x, _ = self.embed(tgt)\n new_cache = []\n for i, decoder in enumerate(self.decoders):\n if cache is None:\n c = None\n else:\n c = cache[i]\n x, tgt_mask, memory, memory_mask = decoder(\n x, tgt_mask, memory, memory_mask, cache=c\n )\n new_cache.append(x)\n if self.normalize_before:\n y = self.after_norm(x[:, -1])\n else:\n y = x[:, -1]\n if self.use_output_layer:\n y = torch.log_softmax(self.output_layer(y), dim=-1)\n return y, new_cache" }, { "identifier": "TransformerEncoder", "path": "modules/wenet_extractor/transformer/encoder.py", "snippet": "class TransformerEncoder(BaseEncoder):\n \"\"\"Transformer encoder module.\"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int = 256,\n attention_heads: int = 4,\n linear_units: int = 2048,\n num_blocks: int = 6,\n dropout_rate: float = 0.1,\n positional_dropout_rate: float = 0.1,\n attention_dropout_rate: float = 0.0,\n input_layer: str = \"conv2d\",\n pos_enc_layer_type: str = \"abs_pos\",\n normalize_before: bool = True,\n static_chunk_size: int = 0,\n use_dynamic_chunk: bool = False,\n global_cmvn: torch.nn.Module = None,\n use_dynamic_left_chunk: bool = False,\n ):\n \"\"\"Construct TransformerEncoder\n\n See Encoder for the meaning of each parameter.\n \"\"\"\n super().__init__(\n input_size,\n output_size,\n attention_heads,\n linear_units,\n num_blocks,\n dropout_rate,\n positional_dropout_rate,\n attention_dropout_rate,\n input_layer,\n pos_enc_layer_type,\n normalize_before,\n static_chunk_size,\n use_dynamic_chunk,\n global_cmvn,\n use_dynamic_left_chunk,\n )\n self.encoders = torch.nn.ModuleList(\n [\n TransformerEncoderLayer(\n output_size,\n MultiHeadedAttention(\n attention_heads, output_size, attention_dropout_rate\n ),\n PositionwiseFeedForward(output_size, linear_units, dropout_rate),\n dropout_rate,\n normalize_before,\n )\n for _ in range(num_blocks)\n ]\n )" }, { "identifier": "IGNORE_ID", "path": "modules/wenet_extractor/utils/common.py", "snippet": "IGNORE_ID = -1" }, { "identifier": "add_sos_eos", "path": "modules/wenet_extractor/utils/common.py", "snippet": "def add_sos_eos(\n ys_pad: torch.Tensor, sos: int, eos: int, ignore_id: int\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Add <sos> and <eos> labels.\n\n Args:\n ys_pad (torch.Tensor): batch of padded target sequences (B, Lmax)\n sos (int): index of <sos>\n eos (int): index of <eeos>\n ignore_id (int): index of padding\n\n Returns:\n ys_in (torch.Tensor) : (B, Lmax + 1)\n ys_out (torch.Tensor) : (B, Lmax + 1)\n\n Examples:\n >>> sos_id = 10\n >>> eos_id = 11\n >>> ignore_id = -1\n >>> ys_pad\n tensor([[ 1, 2, 3, 4, 5],\n [ 4, 5, 6, -1, -1],\n [ 7, 8, 9, -1, -1]], dtype=torch.int32)\n >>> ys_in,ys_out=add_sos_eos(ys_pad, sos_id , eos_id, ignore_id)\n >>> ys_in\n tensor([[10, 1, 2, 3, 4, 5],\n [10, 4, 5, 6, 11, 11],\n [10, 7, 8, 9, 11, 11]])\n >>> ys_out\n tensor([[ 1, 2, 3, 4, 5, 11],\n [ 4, 5, 6, 11, -1, -1],\n [ 7, 8, 9, 11, -1, -1]])\n \"\"\"\n _sos = torch.tensor(\n [sos], dtype=torch.long, requires_grad=False, device=ys_pad.device\n )\n _eos = torch.tensor(\n [eos], dtype=torch.long, requires_grad=False, device=ys_pad.device\n )\n ys = [y[y != ignore_id] for y in ys_pad] # parse padded ys\n ys_in = [torch.cat([_sos, y], dim=0) for y in ys]\n ys_out = [torch.cat([y, _eos], dim=0) for y in ys]\n return pad_list(ys_in, eos), pad_list(ys_out, ignore_id)" }, { "identifier": "th_accuracy", "path": "modules/wenet_extractor/utils/common.py", "snippet": "def th_accuracy(\n pad_outputs: torch.Tensor, pad_targets: torch.Tensor, ignore_label: int\n) -> float:\n \"\"\"Calculate accuracy.\n\n Args:\n pad_outputs (Tensor): Prediction tensors (B * Lmax, D).\n pad_targets (LongTensor): Target label tensors (B, Lmax).\n ignore_label (int): Ignore label id.\n\n Returns:\n float: Accuracy value (0.0 - 1.0).\n\n \"\"\"\n pad_pred = pad_outputs.view(\n pad_targets.size(0), pad_targets.size(1), pad_outputs.size(1)\n ).argmax(2)\n mask = pad_targets != ignore_label\n numerator = torch.sum(\n pad_pred.masked_select(mask) == pad_targets.masked_select(mask)\n )\n denominator = torch.sum(mask)\n return float(numerator) / float(denominator)" }, { "identifier": "make_pad_mask", "path": "modules/wenet_extractor/utils/mask.py", "snippet": "def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:\n \"\"\"Make mask tensor containing indices of padded part.\n\n See description of make_non_pad_mask.\n\n Args:\n lengths (torch.Tensor): Batch of lengths (B,).\n Returns:\n torch.Tensor: Mask tensor containing indices of padded part.\n\n Examples:\n >>> lengths = [5, 3, 2]\n >>> make_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n \"\"\"\n batch_size = lengths.size(0)\n max_len = max_len if max_len > 0 else lengths.max().item()\n seq_range = torch.arange(0, max_len, dtype=torch.int64, device=lengths.device)\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n seq_length_expand = lengths.unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n return mask" } ]
from typing import Dict, Optional, Tuple from modules.wenet_extractor.cif.predictor import MAELoss from modules.wenet_extractor.paraformer.search.beam_search import Hypothesis from modules.wenet_extractor.transformer.asr_model import ASRModel from modules.wenet_extractor.transformer.ctc import CTC from modules.wenet_extractor.transformer.decoder import TransformerDecoder from modules.wenet_extractor.transformer.encoder import TransformerEncoder from modules.wenet_extractor.utils.common import IGNORE_ID, add_sos_eos, th_accuracy from modules.wenet_extractor.utils.mask import make_pad_mask import torch
16,401
Non-autoregressive End-to-End Speech Recognition see https://arxiv.org/pdf/2206.08317.pdf """ def __init__( self, vocab_size: int, encoder: TransformerEncoder, decoder: TransformerDecoder, ctc: CTC, predictor, ctc_weight: float = 0.5, predictor_weight: float = 1.0, predictor_bias: int = 0, ignore_id: int = IGNORE_ID, reverse_weight: float = 0.0, lsm_weight: float = 0.0, length_normalized_loss: bool = False, ): assert 0.0 <= ctc_weight <= 1.0, ctc_weight assert 0.0 <= predictor_weight <= 1.0, predictor_weight super().__init__( vocab_size, encoder, decoder, ctc, ctc_weight, ignore_id, reverse_weight, lsm_weight, length_normalized_loss, ) self.predictor = predictor self.predictor_weight = predictor_weight self.predictor_bias = predictor_bias self.criterion_pre = MAELoss(normalize_length=length_normalized_loss) def forward( self, speech: torch.Tensor, speech_lengths: torch.Tensor, text: torch.Tensor, text_lengths: torch.Tensor, ) -> Dict[str, Optional[torch.Tensor]]: """Frontend + Encoder + Decoder + Calc loss Args: speech: (Batch, Length, ...) speech_lengths: (Batch, ) text: (Batch, Length) text_lengths: (Batch,) """ assert text_lengths.dim() == 1, text_lengths.shape # Check that batch_size is unified assert ( speech.shape[0] == speech_lengths.shape[0] == text.shape[0] == text_lengths.shape[0] ), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape) # 1. Encoder encoder_out, encoder_mask = self.encoder(speech, speech_lengths) encoder_out_lens = encoder_mask.squeeze(1).sum(1) # 2a. Attention-decoder branch if self.ctc_weight != 1.0: loss_att, acc_att, loss_pre = self._calc_att_loss( encoder_out, encoder_mask, text, text_lengths ) else: # loss_att = None # loss_pre = None loss_att: torch.Tensor = torch.tensor(0) loss_pre: torch.Tensor = torch.tensor(0) # 2b. CTC branch if self.ctc_weight != 0.0: loss_ctc = self.ctc(encoder_out, encoder_out_lens, text, text_lengths) else: loss_ctc = None if loss_ctc is None: loss = loss_att + self.predictor_weight * loss_pre # elif loss_att is None: elif loss_att == torch.tensor(0): loss = loss_ctc else: loss = ( self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att + self.predictor_weight * loss_pre ) return { "loss": loss, "loss_att": loss_att, "loss_ctc": loss_ctc, "loss_pre": loss_pre, } def _calc_att_loss( self, encoder_out: torch.Tensor, encoder_mask: torch.Tensor, ys_pad: torch.Tensor, ys_pad_lens: torch.Tensor, ) -> Tuple[torch.Tensor, float, torch.Tensor]: if self.predictor_bias == 1: _, ys_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id) ys_pad_lens = ys_pad_lens + self.predictor_bias pre_acoustic_embeds, pre_token_length, _, pre_peak_index = self.predictor( encoder_out, ys_pad, encoder_mask, ignore_id=self.ignore_id ) # 1. Forward decoder decoder_out, _, _ = self.decoder( encoder_out, encoder_mask, pre_acoustic_embeds, ys_pad_lens ) # 2. Compute attention loss loss_att = self.criterion_att(decoder_out, ys_pad)
# This module is from [WeNet](https://github.com/wenet-e2e/wenet). # ## Citations # ```bibtex # @inproceedings{yao2021wenet, # title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit}, # author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin}, # booktitle={Proc. Interspeech}, # year={2021}, # address={Brno, Czech Republic }, # organization={IEEE} # } # @article{zhang2022wenet, # title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit}, # author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei}, # journal={arXiv preprint arXiv:2203.15455}, # year={2022} # } # class Paraformer(ASRModel): """Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition see https://arxiv.org/pdf/2206.08317.pdf """ def __init__( self, vocab_size: int, encoder: TransformerEncoder, decoder: TransformerDecoder, ctc: CTC, predictor, ctc_weight: float = 0.5, predictor_weight: float = 1.0, predictor_bias: int = 0, ignore_id: int = IGNORE_ID, reverse_weight: float = 0.0, lsm_weight: float = 0.0, length_normalized_loss: bool = False, ): assert 0.0 <= ctc_weight <= 1.0, ctc_weight assert 0.0 <= predictor_weight <= 1.0, predictor_weight super().__init__( vocab_size, encoder, decoder, ctc, ctc_weight, ignore_id, reverse_weight, lsm_weight, length_normalized_loss, ) self.predictor = predictor self.predictor_weight = predictor_weight self.predictor_bias = predictor_bias self.criterion_pre = MAELoss(normalize_length=length_normalized_loss) def forward( self, speech: torch.Tensor, speech_lengths: torch.Tensor, text: torch.Tensor, text_lengths: torch.Tensor, ) -> Dict[str, Optional[torch.Tensor]]: """Frontend + Encoder + Decoder + Calc loss Args: speech: (Batch, Length, ...) speech_lengths: (Batch, ) text: (Batch, Length) text_lengths: (Batch,) """ assert text_lengths.dim() == 1, text_lengths.shape # Check that batch_size is unified assert ( speech.shape[0] == speech_lengths.shape[0] == text.shape[0] == text_lengths.shape[0] ), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape) # 1. Encoder encoder_out, encoder_mask = self.encoder(speech, speech_lengths) encoder_out_lens = encoder_mask.squeeze(1).sum(1) # 2a. Attention-decoder branch if self.ctc_weight != 1.0: loss_att, acc_att, loss_pre = self._calc_att_loss( encoder_out, encoder_mask, text, text_lengths ) else: # loss_att = None # loss_pre = None loss_att: torch.Tensor = torch.tensor(0) loss_pre: torch.Tensor = torch.tensor(0) # 2b. CTC branch if self.ctc_weight != 0.0: loss_ctc = self.ctc(encoder_out, encoder_out_lens, text, text_lengths) else: loss_ctc = None if loss_ctc is None: loss = loss_att + self.predictor_weight * loss_pre # elif loss_att is None: elif loss_att == torch.tensor(0): loss = loss_ctc else: loss = ( self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att + self.predictor_weight * loss_pre ) return { "loss": loss, "loss_att": loss_att, "loss_ctc": loss_ctc, "loss_pre": loss_pre, } def _calc_att_loss( self, encoder_out: torch.Tensor, encoder_mask: torch.Tensor, ys_pad: torch.Tensor, ys_pad_lens: torch.Tensor, ) -> Tuple[torch.Tensor, float, torch.Tensor]: if self.predictor_bias == 1: _, ys_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id) ys_pad_lens = ys_pad_lens + self.predictor_bias pre_acoustic_embeds, pre_token_length, _, pre_peak_index = self.predictor( encoder_out, ys_pad, encoder_mask, ignore_id=self.ignore_id ) # 1. Forward decoder decoder_out, _, _ = self.decoder( encoder_out, encoder_mask, pre_acoustic_embeds, ys_pad_lens ) # 2. Compute attention loss loss_att = self.criterion_att(decoder_out, ys_pad)
acc_att = th_accuracy(
8
2023-11-15 09:19:27+00:00
24k
BobaZooba/xllm
tests/unit/experiments/test_base.py
[ { "identifier": "Config", "path": "src/xllm/core/config.py", "snippet": "class Config:\n \"\"\"\n The `Config` class serves as a comprehensive configuration schema for managing various parameters required during\n the setup and execution of experiments relating to language models, such as training, quantization, and\n optimization.\n\n Write more here:\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#config\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#detailed-config-explanation\n\n This dataclass is used to encapsulate and standardize the configuration for a diverse range of tasks including\n dataset preparation, tokenizer and model initialization, training, evaluation, and interactions with remote services\n like the Hugging Face Model Hub.\n\n Attributes in this class cover aspects like model name and path, tokenizer settings, dataset paths, training\n strategies, quantization methods, hardware acceleration, logging, output directories, and more. The class provides\n properties with custom logic to resolve specific configurations and validation checks to ensure the environment is\n appropriately set up before proceeding with the workflow.\n\n Customization and flexibility are core to this class, as it provides reasonable defaults while also allowing for\n detailed and scalable configurations catered to advanced tasks such as leveraging LoRA, FSDP, deepspeed stage\n setups, and applying incremental quantization techniques like GPTQ and bits-and-bytes.\n\n Methods within the class include:\n - `check`: Performs checks across various attributes for compatibility and correctness.\n - Property getters such as `correct_tokenizer_name_or_path`, `lora_target_modules`, `dtype`, `deepspeed`, `fsdp`,\n and `lora_model_name_or_path_for_fusing` to fetch calculated or defaulted values based on attribute settings.\n\n Subclassing can be done to extend or modify the functionality of the `Config` class to address specific experimental\n scenarios or customized workflows. It is the central piece for orchestrating experimental setups and is intimately\n integrated with the rest of the codebase that operates on top of these configurations.\n\n Attributes:\n\n General Settings:\n - `experiment_key`: An enumeration key to specify the experiment type.\n - `save_safetensors`: A boolean value to indicate whether to use safe serialization for tensors.\n - `max_shard_size`: The maximum shard size when pushing the model to the HuggingFace Hub.\n - `local_rank`: Local rank for distributed training, used for logging and saving.\n - `use_gradient_checkpointing`: If set to `True`, enables gradient checkpointing to reduce memory usage at\n the cost of a slower backward pass.\n - `trainer_key`: An enumeration key to select the trainer using the trainers_registry.\n - `force_fp32`: Forces loading the model in fp32 precision, if set to `True`.\n - `force_fp16`: Forces loading the model in fp16 precision, if set to `True`.\n - `from_gptq`: Indicates if a GPTQ quantized model is being loaded.\n - `huggingface_hub_token`: Token for uploading models to HuggingFace Hub.\n - `deepspeed_stage`: Predefined DeepSpeed stage for optimization.\n - `deepspeed_config_path`: Path to the DeepSpeed config file.\n - `fsdp_strategy`: The strategy to be used for Fully Sharded Data Parallelism (FSDP).\n - `fsdp_offload`: If set to `True`, offloads weights to CPU when using FSDP to save memory.\n - `seed`: Seed for random number generators to ensure reproducibility.\n - `stabilize`: Converts some model weights to fp32 and others to bf16 for stabilization.\n - `path_to_env_file`: Custom path to the .env file for reading environment variables.\n\n Data Preparation:\n - `prepare_dataset`: Flags whether to prepare the dataset during the \"prepare\" stage.\n\n LoRA Fusing:\n - `lora_hub_model_id`: Name of the LoRA model on the hub for fusion.\n - `lora_model_local_path`: Local path to LoRA model to be fused.\n - `fused_model_local_path`: Local path to save the fused model.\n - `fuse_after_training`: If `True`, will fuse the model post-training.\n\n GPTQ Quantization:\n - `quantization_dataset_id`: Dataset ID for GPTQ quantization.\n - `quantization_max_samples`: Maximum number of samples to use during GPTQ quantization.\n - `quantized_model_path`: Path to save the GPTQ quantized model.\n - `quantized_hub_model_id`: Name of the model at the hub post-GPTQ quantization.\n - `quantized_hub_private_repo`: If set to `True`, creates a private repository for the quantized model.\n\n Dataset Related:\n - `dataset_key`: Key to select the dataset from the datasets_registry.\n - `train_local_path_to_data`: Local path to the training data file.\n - `eval_local_path_to_data`: Local path to the evaluation data file.\n - `shuffle`: If `True`, shuffles the training data.\n - `max_eval_samples`: Maximum number of examples to use for evaluation.\n - `add_eval_to_train_if_no_path`: If `True`, adds evaluation data to training if there's no separate eval path.\n\n Tokenizer Settings:\n - `tokenizer_name_or_path`: Name or path to the tokenizer.\n - `tokenizer_use_fast`: If `True`, uses the fast version of the tokenizer.\n - `tokenizer_padding_side`: Sets padding side to 'right' or 'left'.\n\n Data Collator Settings:\n - `collator_key`: Key to select the collator from the collators_registry.\n - `max_length`: Maximum sequence length for the model.\n\n Model Configuration:\n - `model_name_or_path`: Name or path to the model to be used.\n - `push_to_hub_bos_add_bos_token`: Adds BOS token when uploading tokenization configuration to the hub.\n - `use_flash_attention_2`: Flags the use of flash attention 2.\n - `trust_remote_code`: If `True`, trusts remote code from the HuggingFace Hub.\n - `device_map`: Device map for placing model layers on specific devices.\n - `prepare_model_for_kbit_training`: If `True`, prepares the model for k-bit training.\n\n BitsAndBytes Integration:\n - `load_in_8bit`: Load the model in 8-bit mode using bitsandbytes.\n - `load_in_4bit`: Load the model in 4-bit mode using bitsandbytes.\n - `llm_int8_threshold`: Threshold for detecting outliers in the model weights.\n - `llm_int8_has_fp16_weight`: If `True`, the model will have fp16 weights.\n - `bnb_4bit_use_double_quant`: If `True`, a second quantization step is used for 4-bit weights.\n - `bnb_4bit_quant_type`: Specifies the quantization type used for 4-bit weights.\n - `bnb_quantize_after_model_init`: Determines when the quantization should occur.\n\n GPTQ Specific Parameters:\n - `gptq_bits`: Number of bits for GPTQ quantization.\n - `gptq_group_size`: Group size for GPTQ quantization.\n - `gptq_disable_exllama`: If `True`, disables ExLlama kernels during GPTQ quantization.\n\n LoRA Specific Parameters:\n - `apply_lora`: If `True`, applies LoRA to the model.\n - `lora_rank`: LoRA rank to define the size of the LoRA matrices.\n - `lora_alpha`: Multiplication factor for the resulting LoRA matrix.\n - `lora_dropout`: Dropout rate for LoRA.\n - `raw_lora_target_modules`: Comma-separated string of module names to apply LoRA, or 'all' to apply broadly.\n\n Training Arguments:\n - `output_dir`: Path to save training outputs.\n - `per_device_train_batch_size`: Batch size per device for training.\n - `do_eval`: If `True`, performs evaluation.\n - `per_device_eval_batch_size`: Batch size per device for evaluation.\n - `gradient_accumulation_steps`: Number of steps to accumulate gradients for larger effective batch size.\n - `eval_accumulation_steps`: Number of steps to accumulate gradients during evaluation.\n - `eval_delay`: Delay before the first evaluation.\n - `eval_steps`: Number of update steps between evaluations.\n - `warmup_steps`: Number of steps for learning rate warmup.\n - `max_steps`: Maximum number of training steps.\n - `num_train_epochs`: Number of epochs for training.\n - `learning_rate`: Learning rate for the optimizer.\n - `max_grad_norm`: Gradient clipping threshold.\n - `weight_decay`: Coefficient for weight decay regularization.\n - `label_smoothing_factor`: Label smoothing factor.\n - `logging_steps`: Number of steps between logging intermediate results.\n - `save_steps`: Number of training steps between checkpoints and model upload.\n - `save_total_limit`: Maximum number of checkpoints to keep.\n - `optim`: Optimizer name, overwritten by DeepSpeed if used.\n - `push_to_hub`: If `True`, model checkpoints are uploaded to HuggingFace Hub.\n - `hub_model_id`: Name of the model on the HuggingFace Hub.\n - `hub_private_repo`: If `True`, creates a private repository on the HuggingFace Hub.\n\n Weights & Biases Integration:\n - `report_to_wandb`: If `True`, logs metrics to Weights & Biases.\n - `wandb_api_key`: API key for Weights & Biases.\n - `wandb_project`: Project name on Weights & Biases.\n - `wandb_entity`: Entity name (user or organization) on Weights & Biases.\n\n Example of creating a `Config` object:\n ```python\n config = Config(\n model_name_or_path='gpt2',\n dataset_key='my_dataset',\n gradient_accumulation_steps=8,\n max_length=512,\n deepspeed_stage=\"3\",\n )\n ```\n\n Note:\n - Throughout the codebase, `Config` instances are passed around to provide a unified source of configurations\n for various components.\n - It is crucial to ensure all required settings are properly set in a `Config` object before it is utilized,\n particularly when overriding defaults or specifying custom configurations.\n \"\"\"\n\n # general\n experiment_key: str = field(\n default=enums.Experiments.base,\n metadata={\"help\": \"Experiment class key\"},\n )\n save_safetensors: bool = field(\n default=True,\n metadata={\n \"help\": \"Use safe serialization (safe tensors) or not\",\n },\n )\n max_shard_size: str = field(\n default=\"10GB\", metadata={\"help\": \"max_shard_size for the model pushing to the HuggingFace Hub\"}\n )\n local_rank: int = field(\n default=0,\n metadata={\n \"help\": \"Local rank for logging and saving. Works only in distributed training\",\n },\n )\n use_gradient_checkpointing: bool = field(\n default=False,\n metadata={\n \"help\": \"If True, use gradient checkpointing to save memory at the expense of slower backward pass\",\n },\n )\n trainer_key: str = field(\n default=enums.Trainers.lm,\n metadata={\n \"help\": \"Key of the trainer for loading from trainers_registry\",\n },\n )\n force_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp32 when model loading\",\n },\n )\n force_fp16: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp16 when model loading\",\n },\n )\n from_gptq: bool = field(\n default=False,\n metadata={\n \"help\": \"If you loadining GPTQ quantized model\",\n },\n )\n huggingface_hub_token: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"HuggingFace Hub token. You can also set this key using .env file\",\n },\n )\n single_gpu: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Indicates that you are learning on the same GPU. It is necessary to use DeepSpeed on a single GPU\",\n },\n )\n master_port: int = field(\n default=9994,\n metadata={\n \"help\": \"Master port for running DeepSpeed on a single GPU. Modify if RuntimeError: Address already in use\",\n },\n )\n deepspeed_stage: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Predifined DeepSpeed stage\",\n },\n )\n deepspeed_config_path: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Path to DeepSpeed config\",\n },\n )\n fsdp_strategy: str = field(\n default=\"\",\n metadata={\n \"help\": \"FSDP strategy\",\n },\n )\n fsdp_offload: bool = field(\n default=True,\n metadata={\n \"help\": \"Offload weights when using FSDP\",\n },\n )\n seed: int = field(\n default=42,\n metadata={\n \"help\": \"Seed value for random operations\",\n },\n )\n stabilize: bool = field(\n default=False,\n metadata={\n \"help\": \"Stabilize the model. Convert some weights (e.g. LoRA) to bf16\",\n },\n )\n norm_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Convert norm to fp32\",\n },\n )\n path_to_env_file: Optional[str] = field(\n default=\"./.env\",\n metadata={\"help\": \"Custom path to .env file\"},\n )\n\n # prepare\n prepare_dataset: bool = field(\n default=True,\n metadata={\n \"help\": 'Prepare the dataset. Works only at \"prepare\" stage',\n },\n )\n\n # fuse\n lora_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. The name of the LoRA model at the hub for fusing. Example: BobaZooba/Shurale\",\n },\n )\n lora_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. Local path to the LoRA model\",\n },\n )\n fused_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Local path to fused model. Useful if you want to quantize model after fusing on the same machine\",\n },\n )\n fuse_after_training: bool = field(\n default=False,\n metadata={\n \"help\": \"Fuse or not model after training\",\n },\n )\n\n # gptq quantization\n quantization_dataset_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Dataset id for GPTQ quantization. You can install either the idi dataset, or use any dataset\",\n },\n )\n quantization_max_samples: int = field(\n default=1024,\n metadata={\n \"help\": \"Max samples for GPTQ quantization if you use custom dataset\",\n },\n )\n quantized_model_path: str = field(\n default=\"./quantized_model/\",\n metadata={\n \"help\": \"Path to GPTQ quantized model\",\n },\n )\n quantized_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub for GPTQ quantization. Example: BobaZooba/Shurale-GPTQ\",\n },\n )\n quantized_hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository for GPTQ quantization model or not\",\n },\n )\n\n # dataset\n dataset_key: str = field(\n default=enums.Datasets.soda,\n metadata={\n \"help\": \"Key of the dataset for loading from datasets_registry\",\n },\n )\n train_local_path_to_data: str = field(\n default=\"./train.jsonl\",\n metadata={\n \"help\": \"The path to the local training data file\",\n },\n )\n eval_local_path_to_data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The path to the local eval data file\",\n },\n )\n shuffle: bool = field(\n default=True,\n metadata={\n \"help\": \"Shuffle training data\",\n },\n )\n max_eval_samples: int = field(\n default=1_000,\n metadata={\n \"help\": \"Maximum number of examples for evaluation\",\n },\n )\n add_eval_to_train_if_no_path: bool = field(\n default=False,\n metadata={\n \"help\": \"Add evaluation data to training data if their number is greater than max_eval_samples\",\n },\n )\n\n # tokenizer\n tokenizer_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Tokenizer name or path. If the value is not set, \"\n \"then it will be taken from the model_name_or_path\",\n },\n )\n tokenizer_use_fast: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Use fast flag for the tokenizer\",\n },\n )\n tokenizer_padding_side: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Padding side of the collator: None, right or left\",\n },\n )\n\n # collator\n collator_key: str = field(\n default=enums.Collators.lm,\n metadata={\n \"help\": \"Key of the collator for loading from collators_registry\",\n },\n )\n max_length: int = field(\n default=2048,\n metadata={\n \"help\": \"Max sequence length of the model\",\n },\n )\n\n # model\n model_name_or_path: str = field(\n default=\"mistralai/Mistral-7B-v0.1\",\n metadata={\n \"help\": \"Model name or path. It could be from HuggingFace or locally\",\n },\n )\n push_to_hub_bos_add_bos_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload to the hub tokenization config with add_bos_token equals to True. Might be helpful for TGI\"\n },\n )\n use_flash_attention_2: bool = field(\n default=False,\n metadata={\n \"help\": \"Use or not flash attention 2. Requires 1) CUDA >= 11.6; 2) install flash-attn 3) compatible model\",\n },\n )\n trust_remote_code: bool = field(\n default=False,\n metadata={\n \"help\": \"Trust remote code from HuggingFace\",\n },\n )\n device_map: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Device map for loading the model\",\n },\n )\n prepare_model_for_kbit_training: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Prepare or not for kbit training\",\n },\n )\n offload_folder: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Offloading folder. Helps for fusing in colab\",\n },\n )\n\n # bitsandbytes\n load_in_8bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 8 bit using bitsandbytes\",\n },\n )\n load_in_4bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 4 bit using bitsandbytes\",\n },\n )\n llm_int8_threshold: float = field(\n default=6.0,\n metadata={\n \"help\": \"Threshold for outlier detection\",\n },\n )\n llm_int8_has_fp16_weight: bool = field(\n default=True,\n metadata={\n \"help\": \"LLM has weights in fp16\",\n },\n )\n bnb_4bit_use_double_quant: bool = field(\n default=True,\n metadata={\n \"help\": \"Double quantization. \"\n \"This will enable a second quantization after the first \"\n \"one to save an additional 0.4 bits per parameter\",\n },\n )\n bnb_4bit_quant_type: str = field(\n default=\"nf4\",\n metadata={\n \"help\": \"Quantization type for 4 bit\",\n },\n )\n bnb_quantize_after_model_init: bool = field(\n default=False, metadata={\"help\": \"If False, quantization will be at model init\"}\n )\n\n # gptq\n gptq_bits: int = field(\n default=4,\n metadata={\n \"help\": \"Bits for GPTQ quantization\",\n },\n )\n gptq_group_size: int = field(\n default=128,\n metadata={\n \"help\": \"Group size for GPTQ quantization\",\n },\n )\n gptq_disable_exllama: bool = field(\n default=True,\n metadata={\n \"help\": \"Disable ExLlama kernels for GPTQ quantization\",\n },\n )\n\n # lora\n apply_lora: bool = field(\n default=False,\n metadata={\n \"help\": \"Apply LoRA to the model or not\",\n },\n )\n lora_rank: int = field(\n default=8,\n metadata={\n \"help\": \"LoRA rank value. LoRA matrices W_A x R and R x W_B, where R is LoRA rank\",\n },\n )\n lora_alpha: int = field(\n default=32,\n metadata={\n \"help\": \"LoRA alpha value. The resulting LoRA matrix will be multiplied by this value\",\n },\n )\n lora_dropout: float = field(\n default=0.1,\n metadata={\n \"help\": \"LoRA dropout value\",\n },\n )\n raw_lora_target_modules: str = field(\n default=\"all\",\n metadata={\n \"help\": 'Names of modules to apply LoRA. A comma-separated string, for example: \"k,q,v\". '\n 'When setting the value \"all\", LoRA will be applied to all linear layers, except for the '\n \"input embeddings and the lm_head.\",\n },\n )\n\n # training arguments\n output_dir: str = field(\n default=\"./outputs/\",\n metadata={\n \"help\": \"The path to the directory where the artifacts will be saved\",\n },\n )\n per_device_train_batch_size: int = field(\n default=2,\n metadata={\n \"help\": \"Batch size on each GPU\",\n },\n )\n do_eval: bool = field(\n default=False,\n metadata={\n \"help\": \"Run eval or not\",\n },\n )\n per_device_eval_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Batch size on each GPU for evaluation. \"\n \"If None per_device_eval_batch_size equals to per_device_train_batch_size\",\n },\n )\n gradient_accumulation_steps: int = field(\n default=1,\n metadata={\n \"help\": \"Number of steps to accumulate gradients\",\n },\n )\n eval_accumulation_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Number of steps to accumulate gradients at evaluation.\"\n \"If None eval_accumulation_steps equals to gradient_accumulation_steps\",\n },\n )\n eval_delay: int = field(\n default=0,\n metadata={\n \"help\": \"Number of epochs or steps to wait for before the first \"\n \"evaluation can be performed, depending on the evaluation_strategy\"\n },\n )\n eval_steps: Optional[int] = field(\n default=1_000, metadata={\"help\": \"Number of update steps between two evaluations\"}\n )\n warmup_steps: int = field(\n default=1_000,\n metadata={\n \"help\": \"Number of steps to warm up\",\n },\n )\n max_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Maximum number of training steps\",\n },\n )\n num_train_epochs: int = field(\n default=1,\n metadata={\n \"help\": \"Number of training epochs\",\n },\n )\n learning_rate: float = field(\n default=2e-4,\n metadata={\n \"help\": \"Learning rate value\",\n },\n )\n max_grad_norm: float = field(\n default=1.0,\n metadata={\n \"help\": \"Clip grad value\",\n },\n )\n weight_decay: float = field(\n default=0.001,\n metadata={\n \"help\": \"Weight decay value\",\n },\n )\n label_smoothing_factor: float = field(\n default=0.0,\n metadata={\n \"help\": \"Label smoothing value\",\n },\n )\n logging_steps: int = field(\n default=10,\n metadata={\n \"help\": \"Number of steps between logging\",\n },\n )\n save_steps: int = field(\n default=100,\n metadata={\n \"help\": \"The number of training steps between saving the checkpoint and uploading to the hub\",\n },\n )\n save_total_limit: int = field(\n default=1,\n metadata={\n \"help\": \"The number of checkpoints that are saved locally\",\n },\n )\n optim: Optional[str] = field(\n default=\"paged_adamw_8bit\",\n metadata={\n \"help\": \"Optimizer name. It will be overwritten if you use deepspeed\",\n },\n )\n push_to_hub: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload the model to the hub. \"\n \"The model will be uploaded to the hub every save_steps. \"\n \"If LoRA is used, then LoRA's weights will be loaded onto the hub\",\n },\n )\n hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub. Example: BobaZooba/Shurale\",\n },\n )\n hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository or not\",\n },\n )\n neftune_noise_alpha: Optional[float] = field(\n default=None,\n metadata={\n \"help\": \"If not None, this will activate NEFTune noise embeddings. \"\n \"This can drastically improve model performance for instruction fine-tuning\",\n },\n )\n\n # training traction\n project_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Project name for training traction services like W&B\",\n },\n )\n report_to_wandb: bool = field(\n default=False,\n metadata={\n \"help\": \"Report or not to Weight & Biases\",\n },\n )\n wandb_api_key: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases API key. You can also set this key using .env file\",\n },\n )\n wandb_project: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Depreacted, use project_name. Weight & Biases project name\",\n },\n )\n wandb_entity: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases entity name (user or company)\",\n },\n )\n\n def __post_init__(self):\n if self.huggingface_hub_token is not None:\n os.environ[enums.EnvironmentVariables.huggingface_hub_token] = self.huggingface_hub_token\n dist_logger(message=f\"Environment variable {enums.EnvironmentVariables.huggingface_hub_token} set\")\n\n if self.report_to_wandb:\n for key, value in zip(\n [\n enums.EnvironmentVariables.wandb_api_key,\n enums.EnvironmentVariables.wandb_project,\n enums.EnvironmentVariables.wandb_entity,\n ],\n [\n self.wandb_api_key,\n self.correct_project_name,\n self.wandb_entity,\n ],\n ):\n if value is not None:\n os.environ[key] = value\n dist_logger(message=f\"Environment variable {key} set\")\n else:\n os.environ[enums.EnvironmentVariables.wandb_disabled] = \"true\"\n\n @property\n def correct_project_name(self) -> Optional[str]:\n if self.project_name is not None and self.wandb_project is not None:\n dist_logger.warning(\n message=\"You set both project_name and wandb_project.\"\n \"Priority set to project_name for experiment tracking\"\n )\n return self.project_name\n elif self.project_name is not None:\n return self.project_name\n elif self.wandb_project is not None:\n dist_logger.warning(message=\"wandb_project is depreacted, please use project_name instead\")\n return self.wandb_project\n else:\n return None\n\n def check_hub(self) -> None:\n if self.push_to_hub and self.hub_model_id is None:\n raise ValueError(\"You want to push to HF hub, but hub_model_id is None\")\n elif self.hub_model_id is not None and not self.push_to_hub:\n dist_logger.warning(\"You set hub_model_id, but push_to_hub is False\")\n\n return None\n\n def apply_deepspeed_single_gpu(self) -> None:\n os.environ[enums.EnvironmentVariables.master_address] = \"localhost\"\n os.environ[enums.EnvironmentVariables.master_port] = str(self.master_port)\n os.environ[enums.EnvironmentVariables.rank] = \"0\"\n os.environ[enums.EnvironmentVariables.local_rank] = \"0\"\n os.environ[enums.EnvironmentVariables.world_size] = \"1\"\n\n def check_deepspeed(self) -> None:\n if self.deepspeed is not None:\n spec = find_spec(\"deepspeed\")\n\n if spec is None:\n raise ImportError(\"Deepspeed is not None, but failed to import deepspeed. Please install deepspeed.\")\n\n if self.single_gpu:\n self.apply_deepspeed_single_gpu()\n\n return None\n\n def check_flash_attention(self) -> None:\n if self.use_flash_attention_2:\n if not torch.cuda.is_available():\n raise ImportError(\"You want to use flash_attention_2, but CUDA is not available\")\n\n spec = find_spec(\"flash_attn\")\n\n if spec is None:\n raise ImportError(\n \"You want to use flash_attention_2, but flash-attn is not installed. Please install flash-attn.\"\n )\n\n return None\n\n def check_auto_gptq(self) -> None:\n spec = find_spec(\"auto_gptq\")\n\n if spec is None:\n raise ImportError(\n \"You want to quantize model using GPTQ, but auto-gptq is not installed. Please install auto-gptq.\"\n )\n\n return None\n\n def check(self) -> None:\n \"\"\"\n Performs a series of checks to validate the configuration for compatibility with the training environment.\n\n This method is responsible for ensuring that the environment is properly set up for the actions specified in\n the configuration object, such as pushing to Hugging Face's hub, using deepspeed, and using flash attention.\n\n It includes the following checks:\n - Verifies that credentials for Hugging Face hub are provided if the model is intended to be pushed to the hub.\n - Validates that deepspeed is installed if it is specified in the configuration.\n - Ensures that the necessary packages are installed for using flash attention if configured to do so.\n\n Does not return any value.\n\n Raises:\n - ValueError: If the configuration for hub interaction is incorrect.\n - ImportError: If any of the required libraries (e.g., deepspeed, flash-attn, auto-gptq) are not installed.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(...)\n # Before proceeding with training or other operations, run checks to ensure environment compatibility.\n config.check()\n ```\n\n Note:\n - Always invoke this method after initializing a `Config` object and before proceeding with model training\n or other operations that rely on the configuration settings.\n \"\"\"\n self.check_hub()\n self.check_deepspeed()\n self.check_flash_attention()\n\n return None\n\n @property\n def correct_tokenizer_name_or_path(self) -> str:\n \"\"\"\n Resolves the tokenizer name or path to be used for initializing the tokenizer.\n\n This property ensures that if a specific tokenizer name or path is not provided in the configuration object,\n the model name or path is used instead, maintaining consistency between model and tokenizer.\n\n Returns:\n `str`: The name or path of the tokenizer to be used. If `tokenizer_name_or_path` is specified in `Config`\n object, that value is used. Otherwise, `model_name_or_path` is returned as the default tokenizer identifier.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(model_name_or_path=\"gpt2\", tokenizer_name_or_path=None)\n tokenizer_name_or_path = config.correct_tokenizer_name_or_path\n # tokenizer_name_or_path now holds the value \"gpt2\"\n ```\n\n Note:\n - It is a common practice to use the same identifier for both the model and its corresponding tokenizer.\n This property handles such a case automatically when the `tokenizer_name_or_path` is not explicitly set.\n \"\"\"\n if self.tokenizer_name_or_path is not None:\n return self.tokenizer_name_or_path\n else:\n return self.model_name_or_path\n\n @property\n def lora_target_modules(self) -> Optional[List[str]]:\n \"\"\"\n Interprets the LoRA target modules setting from the configuration to determine which model modules to apply\n LoRA to.\n\n LoRA (Low-Rank Adaptation) is a parameter-efficient training method that modifies specific layers within a\n model. This property is responsible for parsing the `raw_lora_target_modules` configuration to identify\n the specific modules (like attention key, query, and value matrices) that LoRA will be applied to.\n\n Returns:\n Optional[List[str]]: A list of module names to apply LoRA to if specified, otherwise `None` if LoRA should\n be applied to all eligible modules as determined by the string \"all\" in `raw_lora_target_modules`.\n\n Raises:\n ValueError: If `raw_lora_target_modules` is not set.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with LoRA targets specified.\n config = Config(raw_lora_target_modules=\"k,q,v\")\n lora_modules = config.lora_target_modules\n # lora_modules now holds the list ['k', 'q', 'v'].\n ```\n\n Note:\n - The `raw_lora_target_modules` should be provided as a comma-separated string specifying the target\n modules. If LoRA should be applied broadly, the value \"all\" can be used.\n \"\"\"\n if self.raw_lora_target_modules == \"all\":\n return None\n elif self.raw_lora_target_modules is not None:\n modules_names = [module_name.strip() for module_name in self.raw_lora_target_modules.split(\",\")]\n return modules_names\n else:\n raise ValueError(\"raw_lora_target_modules doesn't set\")\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Determines the appropriate PyTorch data type for the model based on availability of CUDA and configuration\n settings.\n\n This property assists in setting computational precision for training and inference (e.g., FP32, FP16, BF16),\n basing the decision on system capabilities and user preferences as defined in the `Config` object. The selected\n data type can impact both the computational efficiency and memory usage of the model operations.\n\n Returns:\n `torch.dtype`: The data type to be used for the model tensors. This can be one of the following based on the\n system's CUDA support and configuration flags: `torch.float32` (FP32), `torch.float16` (FP16), or\n `torch.bfloat16` (BF16).\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(force_fp32=False, force_fp16=True)\n model_dtype = config.dtype\n # If CUDA is available and BF16 is supported, model_dtype will be `torch.bfloat16`.\n # Otherwise, it falls back to `torch.float16` due to the forced FP16 configuration.\n ```\n\n Note:\n - This property plays a critical role in memory management and computational efficiency, especially when\n working with large models or limited system resources.\n \"\"\"\n if not torch.cuda.is_available() or self.force_fp32:\n return torch.float32\n elif self.force_fp16:\n return torch.float16\n elif torch.cuda.is_bf16_supported():\n return torch.bfloat16\n else:\n return torch.float16\n\n @property\n def deepspeed(self) -> Optional[Dict[str, Any]]:\n \"\"\"\n Retrieves the deepspeed configuration dictionary based on settings within the `Config` object.\n\n This property parses the deepspeed settings from the configuration to construct the configuration dictionary\n used for ing up deepspeed in the model's training environment. It determines whether a predefined stage\n or a custom configuration file path should be utilized.\n\n Returns:\n `Optional[Dict[str, Any]]`: A dictionary containing deepspeed configurations, or `None` if deepspeed is not\n to be used.\n\n Raises:\n ValueError: If the `deepspeed_stage` specified does not correspond to a known configuration,\n or if a custom deepspeed configuration file path does not exist.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with deepspeed specifications.\n config = Config(deepspeed_stage=\"2\")\n ds_config = config.deepspeed\n # ds_config now contains the deepspeed configuration for stage 2.\n ```\n\n Note:\n - A deepspeed stage is a set of predefined configurations. If this is set, the corresponding configuration\n will be used and any custom deepspeed configuration file will be ignored.\n - If a custom deepspeed configuration file path is given and it exists, that configuration will be loaded\n and used.\n \"\"\"\n deepspeed_config: Optional[Dict[str, Any]] = None\n\n if self.deepspeed_config_path is not None:\n if os.path.isfile(self.deepspeed_config_path):\n with open(self.deepspeed_config_path) as file_object:\n deepspeed_config = json.load(file_object)\n return deepspeed_config\n else:\n raise ValueError(f\"deepspeed_config_path set to {self.deepspeed_config_path}, but not found\")\n\n if self.deepspeed_stage in [0, \"0\", \"stage_0\"]:\n return None\n\n if self.deepspeed_stage is not None:\n deepspeed_config = DS_CONFIG_MAPPER.get(self.deepspeed_stage, None)\n if deepspeed_config is None:\n raise ValueError(\n f'Deepspeed stage \"{self.deepspeed_stage}\" not found in keys: {list(DS_CONFIG_MAPPER.keys())}'\n )\n\n return deepspeed_config\n\n @property\n def fsdp(self) -> Union[str, List[str]]:\n \"\"\"\n Compiles the configurations for Fully Sharded Data Parallel (FSDP) based on the settings in the `Config` object.\n\n This property creates a list containing FSDP-related options, which informs the training process whether to\n enable FSDP and which FSDP strategy to employ.\n\n A list of options (fsdp_strategy) along the following:\n \"full_shard\": Shard parameters, gradients and optimizer states.\n \"shard_grad_op\": Shard optimizer states and gradients.\n \"offload\": Offload parameters and gradients to CPUs (only compatible with \"full_shard\" and \"shard_grad_op\").\n \"auto_wrap\": Automatically recursively wrap layers with FSDP using default_auto_wrap_policy.\n\n Returns:\n `Union[str, List[str]]`: A list of FSDP options as strings. It can be an empty string if FSDP is not used or\n a list with the specified FSDP strategy and options such as offloading.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with FSDP specifications.\n config = Config(fsdp_strategy=\"full_shard\", fsdp_offload=True)\n fsdp_options = config.fsdp\n ```\n\n Note:\n - FSDP strategies and options improve memory efficiency during distributed training by sharding the model's\n parameters across multiple devices.\n - The FSDP settings in the configuration should match the target training environment and system\n capabilities.\n \"\"\"\n fsdp_options = list()\n\n if self.fsdp_strategy is not None and self.fsdp_strategy != \"\":\n fsdp_options.append(self.fsdp_strategy)\n else:\n return \"\"\n\n if self.fsdp_offload:\n fsdp_options.append(FSDPOption.OFFLOAD)\n\n return fsdp_options\n\n @property\n def lora_model_name_or_path_for_fusing(self) -> str:\n \"\"\"\n Determines the name or path of the LoRA model to be used for the fusing process.\n\n This property resolves which model should be fused by checking whether a model ID from the Hugging Face hub or a\n local path to a LoRA model is provided in the configuration object. It is essential for the fusing operation\n when LoRA weights need to be integrated into the base model.\n\n Returns:\n `str`: The Hugging Face hub model ID or the local file path to the LoRA model, depending on which is\n specified.\n\n Raises:\n ValueError: If neither `lora_hub_model_id` nor `lora_model_local_path` is set, indicating that there is no\n model specified for fusing.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with a specified LoRA model on Hugging Face Hub or locally.\n config = Config(lora_hub_model_id=\"username/model-id\", lora_model_local_path=None)\n model_name_or_path = config.lora_model_name_or_path_for_fusing\n # model_name_or_path will hold the value \"username/model-id\".\n ```\n\n Note:\n - This property is specifically used during the model fusing step and should be configured correctly in\n scenarios where LoRA is utilized.\n \"\"\"\n if self.lora_hub_model_id is not None:\n return self.lora_hub_model_id\n elif self.lora_model_local_path is not None:\n return self.lora_model_local_path\n else:\n raise ValueError(\"Please set lora_hub_model_id or lora_model_local_path for fusing\")\n\n @property\n def need_to_prepare_model_for_kbit_training(self) -> bool:\n if self.prepare_model_for_kbit_training is not None:\n return self.prepare_model_for_kbit_training\n else:\n return self.from_gptq or self.load_in_4bit or self.load_in_8bit" }, { "identifier": "Experiment", "path": "src/xllm/experiments/base.py", "snippet": "class Experiment:\n \"\"\"\n The Experiment class orchestrates the setup, execution, and management of the training process for LLM's.\n It encapsulates the creation of datasets, models, tokenizers, collators, and trainers, alongside handling their\n respective configurations and ensuring compatibility across components. The class provides an integrated environment\n to apply model quantization, Low-Rank Adaptation (LoRA), perform training, evaluation, fusing LoRA\n and pushing the results to the Hugging Face Hub.\n\n The class provides methods for various stages of the experiment lifecycle:\n\n - `__init__`: Initializes the experiment with user-provided or default components and configurations.\n - `build`: Constructs all necessary components, setting up the environment for the training process.\n - `run`: Executes the training according to the configuration and prebuilt components, handles post-training\n activities, and optionally fuses LoRA parameters.\n - `push_to_hub`: Uploads the model and tokenizer to the Hugging Face Hub for sharing and deployment.\n - `fuse_lora`: Integrates LoRA parameters for streamlined model deployment if LoRA is applied during training.\n\n Throughout the experiment life cycle, several hooks (`before_*` and `after_*` methods) are provided for users\n to inject custom logic or steps into the process at defined points.\n\n The Experiment class is designed to be flexible and extensible, allowing users to customize the experiment\n by providing their implementations of datasets, models, collators, and trainers or relying on the defaults\n determined by the given configuration parameters.\n\n By handling the intricate setup and ensuring all components work harmoniously, the Experiment class provides\n a structured approach to training language models, thereby simplifying the process for users.\n\n Attributes:\n config (`Config`): Holds the entire configuration for the experiment, including model, dataset,\n and training parameters.\n training_arguments (`Optional[TrainingArguments]`): Encapsulates arguments for training,\n such as batch size, learning rate, and saving preferences.\n train_dataset (`Optional[BaseDataset]`): The dataset for training the model.\n eval_dataset (`Optional[BaseDataset]`): The dataset for evaluating the model.\n tokenizer (`Optional[PreTrainedTokenizer]`): Processes text data for model input.\n collator (`Optional[BaseCollator]`): Prepares batches of data for the model.\n quantization_config (`Union[BitsAndBytesConfig, GPTQConfig, None]`): Settings for model quantization\n to reduce size and improve speed.\n model (`Union[PreTrainedModel, PeftModel, None]`): The actual model object that will be trained.\n lora_config (`Optional[LoraConfig]`): Configuration for LoRA.\n trainer (`Optional[LMTrainer]`): Manages and executes the training process.\n\n The class requires at least a configuration object to be passed during initialization, while other components\n can be optionally specified and will otherwise be built based on the provided configuration.\n \"\"\"\n\n def __init__(\n self,\n config: Config,\n training_arguments: Optional[TrainingArguments] = None,\n train_dataset: Optional[BaseDataset] = None,\n eval_dataset: Optional[BaseDataset] = None,\n tokenizer: Optional[PreTrainedTokenizer] = None,\n collator: Optional[BaseCollator] = None,\n quantization_config: Union[BitsAndBytesConfig, GPTQConfig, None] = None,\n model: Union[PreTrainedModel, PeftModel, None] = None,\n lora_config: Optional[LoraConfig] = None,\n trainer: Optional[LMTrainer] = None,\n ):\n \"\"\"\n Initializes an experiment environment to set up and execute the training process of language models.\n\n Args:\n config (`Config`):\n A configuration object containing all necessary parameters for the experiment such as model details,\n dataset paths, training hyperparameters, etc.\n training_arguments (`Optional[TrainingArguments]`, defaults to `None`):\n Arguments relevant to the training process such as batch size, learning rate, number of epochs,\n and the device to be used for training. If `None`, it will be built based on `config`.\n train_dataset (`Optional[BaseDataset]`, defaults to `None`):\n The dataset to be used for training the model. If `None`, it will be constructed from the details\n present in `config`.\n eval_dataset (`Optional[BaseDataset]`, defaults to `None`):\n The dataset to be used for evaluating the model. It's built only if required and `None` is provided.\n tokenizer (`Optional[PreTrainedTokenizer]`, defaults to `None`):\n The tokenizer instance for text preprocessing. If `None`, it will be built based on `config`.\n collator (`Optional[BaseCollator]`, defaults to `None`):\n A collator instance that prepares data batches for input into the model. If `None`, it will be built\n based on `config`.\n quantization_config (`Union[BitsAndBytesConfig, GPTQConfig, None]`, defaults to `None`):\n Configuration object for model quantization, which can help reduce model size and improve\n inference speed. If not provided, and quantization is desired, it will be built based on `config`.\n model (`Union[PreTrainedModel, PeftModel, None]`, defaults to `None`):\n The model that will undergo training. If `None`, it will be built using the provided `config`.\n lora_config (`Optional[LoraConfig]`, defaults to `None`):\n Configuration for applying Low-Rank Adaptation (LoRA) to enhance the model's capabilities with\n minimal parameter increase. If LoRA is desired and `lora_config` is `None`, it will be constructed.\n trainer (`Optional[LMTrainer]`, defaults to `None`):\n The trainer instance responsible for managing the model's training process. If `None`, it will\n be built considering the other provided components and the `config`.\n\n The constructor method sets up the `Experiment` with the necessary components for training, creating\n default instances for any component not provided. It also includes checks to ensure provided components\n are compatible and satisfies internal conditions for training to proceed.\n \"\"\"\n self.config = config\n\n self.training_arguments = training_arguments\n self.train_dataset = train_dataset\n self.eval_dataset = eval_dataset\n self.tokenizer = tokenizer\n self.collator = collator\n self.quantization_config = quantization_config\n self.model = model\n self.lora_config = lora_config\n self.trainer = trainer\n\n self.internal_checks()\n\n def internal_checks(self) -> None:\n if self.tokenizer is not None and self.collator is not None and self.tokenizer != self.collator.tokenizer:\n dist_logger.warning(\"Tokenizer not equals to tokenizer in collator\")\n\n return None\n\n def internal_checks_before_train(self) -> None:\n if self.training_arguments is not None and self.training_arguments.do_eval and self.eval_dataset is None:\n raise ValueError(\n f\"You set do_eval at config to {self.config.do_eval}, \"\n \"but experiment can't run eval, because eval_dataset is None. \"\n f\"config.eval_local_path_to_data: {self.config.eval_local_path_to_data}\"\n )\n\n def build(self):\n \"\"\"\n Constructs the various components required for running the experiment, including datasets, tokenizer, collator,\n model, and trainer.\n\n This method handles the sequential construction and initialization of components, ensuring that each is\n configured correctly and ready for the training process. If any components have not been externally provided\n during initialization, they will be built using the configuration parameters from the `Config` object.\n\n Following the build sequence, if any component is not initialized, the method builds them as follows:\n\n - It checks for the presence of a `TrainingArguments` instance and builds it if necessary, setting training\n parameters such as directories, logging, and device usage.\n\n - If a training dataset is not provided, the method builds it using the training data location specified in\n the configuration object.\n\n - An evaluation dataset is built similarly if evaluation is required and no dataset was provided.\n\n - A tokenizer is built to process the text data for training and evaluation if not already supplied.\n\n - The data collator, which prepares model inputs, is built if absent.\n\n - The model quantization configuration is built if quantization is desired and no configuration was supplied.\n\n - The actual model to be trained is built using the model details from the configuration object if not already\n provided.\n\n - If quantization is requested through `BitsAndBytesConfig` and deferred until after model initialization, it\n is applied now.\n\n - If Low-Rank Adaptation (LoRA) is configured to be applied, the corresponding adjustments are made\n to the model.\n\n - If the model requires stabilization before training, it is stabilized.\n\n - Finally, the trainer is built, which encapsulates the training logic and handles the execution of\n the training process.\n\n Each step includes pre- and post-construction hooks that allow for custom operations before and after\n building each component. Additionally, the method includes checks to validate the correct assembly and setup\n of the entire experiment before proceeding with the training.\n\n After the build process completes, an internal consistency check is performed to ensure that all components\n are compatible and the experiment is ready to run.\n \"\"\"\n\n dist_logger(\"Experiment building has started\")\n self.at_beginning()\n self.save_config()\n dist_logger.info(\"Config saved\")\n\n self.before_checks()\n self.checks()\n dist_logger(\"Checks passed successfully\")\n self.after_checks()\n\n if self.training_arguments is None:\n self.before_training_arguments_build()\n self.training_arguments = self.build_training_arguments()\n dist_logger(f\"Training arguments was built:\\n{self.training_arguments.to_json_string()}\")\n self.after_training_arguments_build()\n\n if self.train_dataset is None:\n self.before_train_dataset_build()\n self.train_dataset = self.build_train_dataset()\n dist_logger(\n f\"Train dataset {self.train_dataset.__class__.__name__} was built. Size: {len(self.train_dataset)}\"\n )\n self.after_train_dataset_build()\n\n if self.eval_dataset is None:\n self.before_eval_dataset_build()\n self.eval_dataset = self.build_eval_dataset()\n if self.eval_dataset is not None:\n if self.training_arguments is not None:\n self.training_arguments.do_eval = True\n if self.eval_dataset is not None:\n dist_logger(\n f\"Eval dataset {self.eval_dataset.__class__.__name__} was built. Size: {len(self.eval_dataset)}\"\n )\n else:\n dist_logger(\"Eval dataset is None\")\n self.after_eval_dataset_build()\n\n if self.tokenizer is None:\n self.before_tokenizer_build()\n self.tokenizer = self.build_tokenizer()\n dist_logger(f\"Tokenizer {self.config.correct_tokenizer_name_or_path} was built\")\n self.after_tokenizer_build()\n\n if self.collator is None:\n self.before_collator_build()\n self.collator = self.build_collator()\n dist_logger(f\"Collator {self.collator.__class__.__name__} was built\")\n self.after_collator_build()\n\n if self.quantization_config is None:\n self.before_quantization_config_build()\n self.quantization_config = self.build_quantization_config()\n if self.quantization_config is not None:\n dist_logger(f\"Quantization config was built:\\n{self.quantization_config.to_json_string()}\")\n else:\n dist_logger(f\"Quantization config is None. Model will be loaded using {self.config.dtype}\")\n self.after_quantization_config_build()\n\n if self.model is None:\n self.before_model_build()\n self.model = self.build_model()\n dist_logger(f\"Model {self.config.model_name_or_path} was built\")\n self.after_model_build()\n elif self.quantization_config is not None:\n dist_logger.warning(\"quantization_config is not None, but the model was built outside of the experiment\")\n\n if self.is_bnb_quantization and self.config.bnb_quantize_after_model_init:\n self.before_bnb_quantization()\n self.bnb_quantization()\n bnb_quantization_type = \"int4\" if self.quantization_config.load_in_4bit else \"int8\"\n dist_logger(f\"Bnb quantization applyed. Type: {bnb_quantization_type}\")\n self.after_bnb_quantization()\n\n if self.config.apply_lora:\n self.before_lora_apply()\n self.lora_config = self.apply_lora()\n dist_logger(f\"LoRA applied to the model {self.config.model_name_or_path}\")\n self.after_lora_apply()\n\n if self.config.stabilize:\n self.before_stabilize_training()\n self.stabilize_training()\n dist_logger(f\"Model {self.config.model_name_or_path} is stabilized for training\")\n self.after_stabilize_training()\n\n if self.trainer is None:\n self.before_trainer_build()\n self.trainer = self.build_trainer()\n dist_logger(f\"Trainer {self.trainer.__class__.__name__} was built\")\n self.after_trainer_build()\n else:\n trainer_components = self.trainer_components\n trainer_components_is_not_none = [key for key, value in trainer_components.items() if value is not None]\n dist_logger.warning(\n \"Trainer was built outside of the experiment, \"\n f\"but this components is not None: {', '.join(trainer_components_is_not_none)}.\"\n )\n\n self.internal_checks()\n\n dist_logger(\"Experiment built successfully\")\n\n @property\n def is_bnb_quantization(self) -> bool:\n return isinstance(self.quantization_config, BitsAndBytesConfig) and self.config.from_gptq\n\n @property\n def trainer_components(self) -> Dict[str, Any]:\n components = {\n \"training_arguments\": self.training_arguments,\n \"train_dataset\": self.train_dataset,\n \"eval_dataset\": self.eval_dataset,\n \"collator\": self.collator,\n \"model\": self.model,\n }\n return components\n\n # checks\n def before_checks(self) -> None:\n return None\n\n def checks(self) -> None:\n if not torch.cuda.is_available():\n dist_logger.warning(\"CUDA is not available\")\n\n self.config.check()\n\n return None\n\n def after_checks(self) -> None:\n return None\n\n # training arguments\n def before_training_arguments_build(self) -> None:\n return None\n\n def build_training_arguments(self) -> TrainingArguments:\n training_arguments = build_training_arguments(config=self.config)\n return training_arguments\n\n def after_training_arguments_build(self) -> None:\n return None\n\n # train_dataset\n def before_train_dataset_build(self) -> None:\n return None\n\n def build_train_dataset_additional_kwargs(self) -> Dict[str, Any]:\n return dict()\n\n def build_train_dataset(self) -> BaseDataset:\n train_dataset_additional_kwargs = self.build_train_dataset_additional_kwargs()\n dataset = build_dataset(config=self.config, is_train=True, **train_dataset_additional_kwargs)\n if dataset is None:\n raise ValueError(\"Train dataset can't be loaded\")\n return dataset\n\n def after_train_dataset_build(self) -> None:\n return None\n\n # eval_dataset\n def before_eval_dataset_build(self) -> None:\n return None\n\n def build_eval_dataset_additional_kwargs(self) -> Dict[str, Any]:\n return self.build_train_dataset_additional_kwargs()\n\n def build_eval_dataset(self) -> Optional[BaseDataset]:\n eval_dataset_additional_kwargs = self.build_eval_dataset_additional_kwargs()\n dataset = build_dataset(config=self.config, is_train=False, **eval_dataset_additional_kwargs)\n return dataset\n\n def after_eval_dataset_build(self) -> None:\n return None\n\n # tokenizer\n def before_tokenizer_build(self) -> None:\n return None\n\n def build_tokenizer(self) -> PreTrainedTokenizer:\n tokenizer = build_tokenizer(config=self.config, use_fast=self.config.tokenizer_use_fast)\n return tokenizer\n\n def after_tokenizer_build(self) -> None:\n return None\n\n # collator\n def before_collator_build(self) -> None:\n return None\n\n def build_collator_additional_kwargs(self) -> Dict[str, Any]:\n return dict()\n\n def build_collator(self) -> BaseCollator:\n collator_additional_kwargs = self.build_collator_additional_kwargs()\n collator = build_collator(config=self.config, tokenizer=self.tokenizer, **collator_additional_kwargs)\n return collator\n\n def after_collator_build(self) -> None:\n return None\n\n # quantization_config\n def before_quantization_config_build(self) -> None:\n return None\n\n def build_quantization_config(self) -> Union[BitsAndBytesConfig, GPTQConfig, None]:\n quantization_config = build_quantization_config(config=self.config)\n return quantization_config\n\n def after_quantization_config_build(self) -> None:\n return None\n\n # model\n def before_model_build(self) -> None:\n return None\n\n def build_model(self) -> PreTrainedModel:\n quantization_config = (\n None if self.is_bnb_quantization and self.config.bnb_quantize_after_model_init else self.quantization_config\n )\n model = build_model(config=self.config, quantization_config=quantization_config)\n return model\n\n def after_model_build(self) -> None:\n return None\n\n # bnb_quantization\n def before_bnb_quantization(self) -> None:\n return None\n\n def bnb_quantization(self) -> None:\n self.model = replace_with_bnb_linear(\n model=self.model,\n quantization_config=self.quantization_config,\n )\n self.model.is_loaded_in_4bit = self.config.load_in_4bit\n self.model.is_loaded_in_8bit = self.config.load_in_8bit\n if self.config.need_to_prepare_model_for_kbit_training:\n self.model = prepare_model_for_kbit_training(\n model=self.model, use_gradient_checkpointing=self.config.use_gradient_checkpointing\n )\n\n def after_bnb_quantization(self) -> None:\n return None\n\n # lora\n def before_lora_apply(self) -> None:\n return None\n\n def apply_lora(self) -> LoraConfig:\n self.model, lora_config = apply_lora(config=self.config, model=self.model, lora_config=self.lora_config)\n return lora_config\n\n def after_lora_apply(self) -> None:\n return None\n\n # stabilize_training\n def before_stabilize_training(self) -> None:\n return None\n\n def stabilize_training(self) -> None:\n self.model = stabilize_training(model=self.model, norm_fp32=self.config.norm_fp32)\n\n def after_stabilize_training(self) -> None:\n return None\n\n # trainer\n def before_trainer_build(self) -> None:\n return None\n\n def build_trainer_additional_kwargs(self) -> Dict[str, Any]:\n return dict()\n\n def build_trainer(self) -> LMTrainer:\n additional_trainer_kwargs = self.build_trainer_additional_kwargs()\n\n if self.tokenizer is None:\n raise ValueError(\"tokenizer is None\")\n\n if self.train_dataset is None:\n raise ValueError(\"train_dataset is None\")\n\n if self.collator is None:\n raise ValueError(\"collator is None\")\n\n trainer = build_trainer(\n config=self.config,\n pad_token_id=self.tokenizer.pad_token_id,\n training_arguments=self.training_arguments,\n model=self.model,\n train_dataset=self.train_dataset,\n collator=self.collator,\n eval_dataset=self.eval_dataset,\n **additional_trainer_kwargs,\n )\n\n return trainer\n\n def after_trainer_build(self) -> None:\n return None\n\n def save_config(self) -> None:\n json_config = json.dumps(self.config.__dict__, indent=2)\n dist_logger(f\"Config:\\n{json_config}\")\n\n if is_distributed_training():\n if distributed.get_rank() == self.config.local_rank:\n os.makedirs(self.config.output_dir, exist_ok=True)\n with open(os.path.join(self.config.output_dir, \"training_config.json\"), \"w\") as file_object:\n file_object.write(json_config)\n else:\n os.makedirs(self.config.output_dir, exist_ok=True)\n with open(os.path.join(self.config.output_dir, \"training_config.json\"), \"w\") as file_object:\n file_object.write(json_config)\n\n return None\n\n def before_train(self) -> None:\n return None\n\n def run(self):\n \"\"\"\n Executes the training process for the experiment. Before calling this method, the build method must be called\n\n Before beginning, this method runs a series of internal checks to validate that the experiment is set up\n correctly and all necessary components are in place. This includes verifying that the trainer and\n training arguments are not `None` and checking if evaluation is requested, ensuring that the evaluation dataset\n is available.\n\n The method then performs the following steps:\n\n - Calls the `before_train` method, which serves as a hook for any pre-training procedures or custom actions\n to be performed just before training starts.\n\n - Starts the training process by calling the `train` method on the trainer object.\n\n - Logs the completion of training and proceeds to any post-training steps.\n\n - If the `fuse_after_training` flag is set in the configuration, LoRA layers, if used, are integrated into\n the main model parameters.\n\n - Handles the post-training tasks such as model saving and optionally pushing the trained model\n to the Hugging Face Hub.\n\n - Calls the `after_train` method, a hook for post-training actions that need to be executed after\n the entire training process has completed.\n\n - Lastly, it performs any actions required at the end of the experiment via the `at_end` method.\n\n If the process was successful, the model will have updated weights that reflect the training it underwent,\n and all artifacts such as logs, model checkpoints, and final model files will be saved at their respective\n locations as defined in the training arguments.\n\n Note: This method assumes that all the necessary components are already built and the experiment is ready\n to run. If this is not the case, an appropriate `ValueError` will be raised indicating which required component\n is missing.\n \"\"\"\n\n self.before_train()\n\n if self.trainer is None:\n raise ValueError(\"trainer is None\")\n\n if self.training_arguments is None:\n raise ValueError(\"training_arguments is None\")\n\n self.internal_checks_before_train()\n dist_logger(\"Training will start soon\")\n self.trainer.train()\n dist_logger(\"Training end\")\n\n self.after_train()\n\n if self.config.fuse_after_training:\n self.fuse_lora()\n\n if is_distributed_training():\n if distributed.get_rank() == self.config.local_rank:\n post_training(config=self.config, tokenizer=self.tokenizer)\n else:\n post_training(config=self.config, tokenizer=self.tokenizer)\n\n dist_logger(f\"Model saved to {self.training_arguments.output_dir}\")\n\n self.at_end()\n\n def push_to_hub(\n self,\n repo_id: Optional[str] = None,\n private: Optional[bool] = None,\n safe_serialization: Optional[bool] = None,\n need_push_to_hub_bos_add_bos_token: Optional[bool] = None,\n ) -> None:\n \"\"\"\n Pushes the trained model and its tokenizer to the Hugging Face Hub.\n\n This method helps you to upload the final trained model and its tokenizer directly to the Hugging Face\n Hub, making it easily accessible for sharing and deploying.\n\n Args:\n repo_id (`Optional[str]`, defaults to `None`):\n The repository name for the model on the Hugging Face Hub. If `None`, it defaults to using the\n `hub_model_id` from the configuration.\n private (`Optional[bool]`, defaults to `None`):\n A boolean flag to set the repository as private or public. If `None`, it uses the `hub_private_repo`\n setting from the configuration.\n safe_serialization (`Optional[bool]`, defaults to `None`):\n A boolean flag to enable safe serialization of model weights. If `None`, it uses the `save_safetensors`\n setting from the configuration.\n need_push_to_hub_bos_add_bos_token (`Optional[bool]`, defaults to `None`):\n A boolean flag to indicate if there is a need to handle the special case for BOS tokens when the model\n uses `bos_token`. If `None`, it uses the `push_to_hub_bos_add_bos_token` setting from the configuration.\n\n This method checks for proper initialization of the repository ID (`repo_id`) and raises a `ValueError` if it's\n not provided and not specified in the configuration. It then proceeds to push the tokenizer and model\n to the Hugging Face Hub using the provided parameters or defaults from the configuration.\n\n The model is uploaded with the specified serialization method to ensure compatibility and potential sharding\n for very large models (`max_shard_size` setting from the configuration). The tokenizer is also uploaded,\n and if needed, an additional procedure is invoked to handle special cases for BOS tokens.\n\n Note: If the method encounters a `None` value for the tokenizer when it attempts to push it to the hub,\n a warning is logged, and no action is taken for the tokenizer.\n\n By the end of this method, the artifacts (model and tokenizer) are available on the Hugging Face Hub at the\n specified repository, accessible according to the privacy settings.\n \"\"\"\n\n repo_id = repo_id or self.config.hub_model_id\n\n private = private if private is not None else self.config.hub_private_repo\n safe_serialization = safe_serialization if safe_serialization is not None else self.config.save_safetensors\n need_push_to_hub_bos_add_bos_token = (\n need_push_to_hub_bos_add_bos_token\n if need_push_to_hub_bos_add_bos_token is not None\n else self.config.push_to_hub_bos_add_bos_token\n )\n\n if repo_id is None:\n raise ValueError(\"repo_id and hub_model_id is None, but you want to push to HF hub\")\n\n if self.tokenizer is None:\n dist_logger.warning(\"Tokenizer is None. Can't push to the hub\")\n else:\n self.tokenizer.push_to_hub(repo_id=repo_id, private=private)\n sleep(10.0)\n if need_push_to_hub_bos_add_bos_token:\n push_to_hub_bos_add_bos_token(repo_id=repo_id)\n\n if self.model is None:\n raise ValueError(\"Model is None. Can't push to the hub\")\n else:\n self.model.push_to_hub(\n repo_id=repo_id,\n private=private,\n safe_serialization=safe_serialization,\n max_shard_size=self.config.max_shard_size,\n )\n\n def fuse_lora(self) -> PreTrainedModel:\n \"\"\"\n Integrates Low-Rank Adaptation (LoRA) parameters into the main model parameters, effectively 'fusing' them.\n\n This method is called after training if the `fuse_after_training` flag is set in the configuration.\n Fusing LoRA parameters is a process of merging LoRA's low-rank matrices into the main model's\n weight matrices, which reduces the number of parameters and can potentially streamline deployment for inference.\n\n The method performs the following steps:\n\n - Checks whether LoRA was applied during the setup of the experiment. If LoRA was not used or the\n `apply_lora` flag in the configuration is set to `False`, a warning is logged, and no fusing is performed.\n\n - If the model is an instance of `PeftModel` (a model class that supports parameter-efficient\n fine-tuning techniques like LoRA), it proceeds to merge LoRA parameters with the main model parameter\n matrices. If the model is not of type `PeftModel`, a `TypeError` is raised.\n\n - Logs the completion of the LoRA fusion process.\n\n - Executes any custom operations or cleanup needed after the fusion process through the `after_fuse` method,\n which serves as a hook.\n\n Upon successful completion of this method, the model's parameters will be updated to reflect the incorporation\n of LoRA adjustments, and the model will be ready for further actions such as evaluation, saving, or deployment.\n\n Returns:\n `PreTrainedModel`: The updated model with LoRA parameters fused into the main model weights.\n \"\"\"\n\n if not self.config.apply_lora:\n dist_logger.warning(\"Apply LoRA set to False at config\")\n\n if isinstance(self.model, PeftModel):\n self.model = self.model.merge_and_unload()\n else:\n raise TypeError(f\"Can't fuse model, this the model is not the PeftModel. Model type: {type(self.model)}\")\n\n dist_logger(\"LoRA fused\")\n\n self.after_fuse()\n\n return self.model\n\n def after_fuse(self) -> None:\n return None\n\n def after_train(self) -> None:\n return None\n\n def at_beginning(self) -> None:\n return None\n\n def at_end(self) -> None:\n return None" }, { "identifier": "LLAMA_TOKENIZER_DIR", "path": "tests/helpers/constants.py", "snippet": "LLAMA_TOKENIZER_DIR: str = os.path.join(TOKENIZERS_DIR, \"llama/\")" }, { "identifier": "patch_from_pretrained_auto_causal_lm", "path": "tests/helpers/patches.py", "snippet": "@contextmanager\ndef patch_from_pretrained_auto_causal_lm(monkeypatch: MonkeyPatch) -> Any:\n def from_pretrained(\n pretrained_model_name_or_path: str,\n quantization_config: Union[BitsAndBytesConfig, GPTQConfig, None] = None,\n torch_dtype: dtype = torch.float16,\n trust_remote_code: bool = True,\n device_map: Union[str, Dict[str, Any], None] = None,\n use_cache: bool = False,\n use_flash_attention_2: bool = True,\n ) -> LlamaForCausalLM:\n config = LlamaConfig(\n vocab_size=32_000,\n hidden_size=8,\n intermediate_size=32,\n num_hidden_layers=2,\n num_attention_heads=2,\n max_position_embeddings=32,\n )\n model = LlamaForCausalLM(config=config)\n return model\n\n monkeypatch.setattr(AutoModelForCausalLM, \"from_pretrained\", from_pretrained)\n yield True\n monkeypatch.undo()" }, { "identifier": "patch_trainer_train", "path": "tests/helpers/patches.py", "snippet": "@contextmanager\ndef patch_trainer_train(monkeypatch: MonkeyPatch) -> Any:\n def train(*args, **kwargs):\n return None\n\n monkeypatch.setattr(LMTrainer, \"train\", train)\n yield True\n monkeypatch.undo()" } ]
import os from pytest import MonkeyPatch from src.xllm.core.config import Config from src.xllm.experiments.base import Experiment from tests.helpers.constants import LLAMA_TOKENIZER_DIR from tests.helpers.patches import patch_from_pretrained_auto_causal_lm, patch_trainer_train
18,218
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_base_experiment_init(monkeypatch: MonkeyPatch, path_to_train_dummy_data: str): config = Config( push_to_hub=False, deepspeed_stage="0", train_local_path_to_data=path_to_train_dummy_data, tokenizer_name_or_path=LLAMA_TOKENIZER_DIR, ) with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch):
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_base_experiment_init(monkeypatch: MonkeyPatch, path_to_train_dummy_data: str): config = Config( push_to_hub=False, deepspeed_stage="0", train_local_path_to_data=path_to_train_dummy_data, tokenizer_name_or_path=LLAMA_TOKENIZER_DIR, ) with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch):
Experiment(config=config)
1
2023-11-10 17:55:03+00:00
24k
AMAAI-Lab/mustango
audioldm/pipeline.py
[ { "identifier": "LatentDiffusion", "path": "audioldm/ldm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(\n self,\n device=\"cuda\",\n first_stage_config=None,\n cond_stage_config=None,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n base_learning_rate=None,\n *args,\n **kwargs,\n ):\n self.device = device\n self.learning_rate = base_learning_rate\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs[\"timesteps\"]\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = \"concat\" if concat_mode else \"crossattn\"\n if cond_stage_config == \"__is_unconditional__\":\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n self.cond_stage_key_orig = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer(\"scale_factor\", torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n\n def make_cond_schedule(\n self,\n ):\n self.cond_ids = torch.full(\n size=(self.num_timesteps,),\n fill_value=self.num_timesteps - 1,\n dtype=torch.long,\n )\n ids = torch.round(\n torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)\n ).long()\n self.cond_ids[: self.num_timesteps_cond] = ids\n\n def register_schedule(\n self,\n given_betas=None,\n beta_schedule=\"linear\",\n timesteps=1000,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3,\n ):\n super().register_schedule(\n given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s\n )\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != \"__is_first_stage__\"\n assert config != \"__is_unconditional__\"\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n self.cond_stage_model = self.cond_stage_model.to(self.device)\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(\n f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\"\n )\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, \"encode\") and callable(\n self.cond_stage_model.encode\n ):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n # Text input is list\n if type(c) == list and len(c) == 1:\n c = self.cond_stage_model([c[0], c[0]])\n c = c[0:1]\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n @torch.no_grad()\n def get_input(\n self,\n batch,\n k,\n return_first_stage_encode=True,\n return_first_stage_outputs=False,\n force_c_encode=False,\n cond_key=None,\n return_original_cond=False,\n bs=None,\n ):\n x = super().get_input(batch, k)\n\n if bs is not None:\n x = x[:bs]\n\n x = x.to(self.device)\n\n if return_first_stage_encode:\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n else:\n z = None\n\n if self.model.conditioning_key is not None:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in [\"caption\", \"coordinates_bbox\"]:\n xc = batch[cond_key]\n elif cond_key == \"class_label\":\n xc = batch\n else:\n # [bs, 1, 527]\n xc = super().get_input(batch, cond_key)\n if type(xc) == torch.Tensor:\n xc = xc.to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n\n if bs is not None:\n c = c[:bs]\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {\"pos_x\": pos_x, \"pos_y\": pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, \"b h w c -> b c h w\").contiguous()\n\n z = 1.0 / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n def mel_spectrogram_to_waveform(self, mel):\n # Mel: [bs, 1, t-steps, fbins]\n if len(mel.size()) == 4:\n mel = mel.squeeze(1)\n mel = mel.permute(0, 2, 1)\n waveform = self.first_stage_model.vocoder(mel)\n waveform = waveform.cpu().detach().numpy()\n return waveform\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n\n if isinstance(cond, dict):\n # hybrid case, cond is exptected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n if self.model.conditioning_key == \"concat\":\n key = \"c_concat\"\n elif self.model.conditioning_key == \"crossattn\":\n key = \"c_crossattn\"\n else:\n key = \"c_film\"\n\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def p_mean_variance(\n self,\n x,\n c,\n t,\n clip_denoised: bool,\n return_codebook_ids=False,\n quantize_denoised=False,\n return_x0=False,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(\n self, model_out, x, t, c, **corrector_kwargs\n )\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1.0, 1.0)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(\n x_start=x_recon, x_t=x, t=t\n )\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(\n self,\n x,\n c,\n t,\n clip_denoised=False,\n repeat_noise=False,\n return_codebook_ids=False,\n quantize_denoised=False,\n return_x0=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(\n x=x,\n c=c,\n t=t,\n clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (\n (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))).contiguous()\n )\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (\n 0.5 * model_log_variance\n ).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return (\n model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise,\n x0,\n )\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(\n self,\n cond,\n shape,\n verbose=True,\n callback=None,\n quantize_denoised=False,\n img_callback=None,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n batch_size=None,\n x_T=None,\n start_T=None,\n log_every_t=None,\n ):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = (\n tqdm(\n reversed(range(0, timesteps)),\n desc=\"Progressive Generation\",\n total=timesteps,\n )\n if verbose\n else reversed(range(0, timesteps))\n )\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n return_x0=True,\n temperature=temperature[i],\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(\n self,\n cond,\n shape,\n return_intermediates=False,\n x_T=None,\n verbose=True,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n start_T=None,\n log_every_t=None,\n ):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = (\n tqdm(reversed(range(0, timesteps)), desc=\"Sampling t\", total=timesteps)\n if verbose\n else reversed(range(0, timesteps))\n )\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n )\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(\n self,\n cond,\n batch_size=16,\n return_intermediates=False,\n x_T=None,\n verbose=True,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n shape=None,\n **kwargs,\n ):\n if shape is None:\n shape = (batch_size, self.channels, self.latent_t_size, self.latent_f_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n return self.p_sample_loop(\n cond,\n shape,\n return_intermediates=return_intermediates,\n x_T=x_T,\n verbose=verbose,\n timesteps=timesteps,\n quantize_denoised=quantize_denoised,\n mask=mask,\n x0=x0,\n **kwargs,\n )\n\n @torch.no_grad()\n def sample_log(\n self,\n cond,\n batch_size,\n ddim,\n ddim_steps,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_plms=False,\n mask=None,\n **kwargs,\n ):\n\n if mask is not None:\n shape = (self.channels, mask.size()[-2], mask.size()[-1])\n else:\n shape = (self.channels, self.latent_t_size, self.latent_f_size)\n\n intermediate = None\n if ddim and not use_plms:\n # print(\"Use ddim sampler\")\n\n ddim_sampler = DDIMSampler(self)\n samples, intermediates = ddim_sampler.sample(\n ddim_steps,\n batch_size,\n shape,\n cond,\n verbose=False,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n mask=mask,\n **kwargs,\n )\n\n else:\n # print(\"Use DDPM sampler\")\n samples, intermediates = self.sample(\n cond=cond,\n batch_size=batch_size,\n return_intermediates=True,\n unconditional_guidance_scale=unconditional_guidance_scale,\n mask=mask,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs,\n )\n\n return samples, intermediate\n\n @torch.no_grad()\n def generate_sample(\n self,\n batchs,\n ddim_steps=200,\n ddim_eta=1.0,\n x_T=None,\n n_candidate_gen_per_text=1,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n name=\"waveform\",\n use_plms=False,\n save=False,\n **kwargs,\n ):\n # Generate n_candidate_gen_per_text times and select the best\n # Batch: audio, text, fnames\n assert x_T is None\n try:\n batchs = iter(batchs)\n except TypeError:\n raise ValueError(\"The first input argument should be an iterable object\")\n\n if use_plms:\n assert ddim_steps is not None\n use_ddim = ddim_steps is not None\n # waveform_save_path = os.path.join(self.get_log_dir(), name)\n # os.makedirs(waveform_save_path, exist_ok=True)\n # print(\"Waveform save path: \", waveform_save_path)\n\n with self.ema_scope(\"Generate\"):\n for batch in batchs:\n z, c = self.get_input(\n batch,\n self.first_stage_key,\n cond_key=self.cond_stage_key,\n return_first_stage_outputs=False,\n force_c_encode=True,\n return_original_cond=False,\n bs=None,\n )\n text = super().get_input(batch, \"text\")\n\n # Generate multiple samples\n batch_size = z.shape[0] * n_candidate_gen_per_text\n c = torch.cat([c] * n_candidate_gen_per_text, dim=0)\n text = text * n_candidate_gen_per_text\n\n if unconditional_guidance_scale != 1.0:\n unconditional_conditioning = (\n self.cond_stage_model.get_unconditional_condition(batch_size)\n )\n\n samples, _ = self.sample_log(\n cond=c,\n batch_size=batch_size,\n x_T=x_T,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n use_plms=use_plms,\n )\n \n if(torch.max(torch.abs(samples)) > 1e2):\n samples = torch.clip(samples, min=-10, max=10)\n \n mel = self.decode_first_stage(samples)\n\n waveform = self.mel_spectrogram_to_waveform(mel)\n\n if waveform.shape[0] > 1:\n similarity = self.cond_stage_model.cos_similarity(\n torch.FloatTensor(waveform).squeeze(1), text\n )\n\n best_index = []\n for i in range(z.shape[0]):\n candidates = similarity[i :: z.shape[0]]\n max_index = torch.argmax(candidates).item()\n best_index.append(i + max_index * z.shape[0])\n\n waveform = waveform[best_index]\n # print(\"Similarity between generated audio and text\", similarity)\n # print(\"Choose the following indexes:\", best_index)\n\n return waveform\n\n @torch.no_grad()\n def generate_sample_masked(\n self,\n batchs,\n ddim_steps=200,\n ddim_eta=1.0,\n x_T=None,\n n_candidate_gen_per_text=1,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n name=\"waveform\",\n use_plms=False,\n time_mask_ratio_start_and_end=(0.25, 0.75),\n freq_mask_ratio_start_and_end=(0.75, 1.0),\n save=False,\n **kwargs,\n ):\n # Generate n_candidate_gen_per_text times and select the best\n # Batch: audio, text, fnames\n assert x_T is None\n try:\n batchs = iter(batchs)\n except TypeError:\n raise ValueError(\"The first input argument should be an iterable object\")\n\n if use_plms:\n assert ddim_steps is not None\n use_ddim = ddim_steps is not None\n # waveform_save_path = os.path.join(self.get_log_dir(), name)\n # os.makedirs(waveform_save_path, exist_ok=True)\n # print(\"Waveform save path: \", waveform_save_path)\n\n with self.ema_scope(\"Generate\"):\n for batch in batchs:\n z, c = self.get_input(\n batch,\n self.first_stage_key,\n cond_key=self.cond_stage_key,\n return_first_stage_outputs=False,\n force_c_encode=True,\n return_original_cond=False,\n bs=None,\n )\n text = super().get_input(batch, \"text\")\n \n # Generate multiple samples\n batch_size = z.shape[0] * n_candidate_gen_per_text\n \n _, h, w = z.shape[0], z.shape[2], z.shape[3]\n \n mask = torch.ones(batch_size, h, w).to(self.device)\n \n mask[:, int(h * time_mask_ratio_start_and_end[0]) : int(h * time_mask_ratio_start_and_end[1]), :] = 0 \n mask[:, :, int(w * freq_mask_ratio_start_and_end[0]) : int(w * freq_mask_ratio_start_and_end[1])] = 0 \n mask = mask[:, None, ...]\n \n c = torch.cat([c] * n_candidate_gen_per_text, dim=0)\n text = text * n_candidate_gen_per_text\n\n if unconditional_guidance_scale != 1.0:\n unconditional_conditioning = (\n self.cond_stage_model.get_unconditional_condition(batch_size)\n )\n\n samples, _ = self.sample_log(\n cond=c,\n batch_size=batch_size,\n x_T=x_T,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n use_plms=use_plms, mask=mask, x0=torch.cat([z] * n_candidate_gen_per_text)\n )\n\n mel = self.decode_first_stage(samples)\n\n waveform = self.mel_spectrogram_to_waveform(mel)\n\n if waveform.shape[0] > 1:\n similarity = self.cond_stage_model.cos_similarity(\n torch.FloatTensor(waveform).squeeze(1), text\n )\n\n best_index = []\n for i in range(z.shape[0]):\n candidates = similarity[i :: z.shape[0]]\n max_index = torch.argmax(candidates).item()\n best_index.append(i + max_index * z.shape[0])\n\n waveform = waveform[best_index]\n # print(\"Similarity between generated audio and text\", similarity)\n # print(\"Choose the following indexes:\", best_index)\n\n return waveform" }, { "identifier": "seed_everything", "path": "audioldm/utils.py", "snippet": "def seed_everything(seed):\n import random, os\n import numpy as np\n import torch\n\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True" }, { "identifier": "default_audioldm_config", "path": "audioldm/utils.py", "snippet": "def default_audioldm_config(model_name=\"audioldm-s-full\"): \n basic_config = {\n \"wave_file_save_path\": \"./output\",\n \"id\": {\n \"version\": \"v1\",\n \"name\": \"default\",\n \"root\": \"/mnt/fast/nobackup/users/hl01486/projects/general_audio_generation/AudioLDM-python/config/default/latent_diffusion.yaml\",\n },\n \"preprocessing\": {\n \"audio\": {\"sampling_rate\": 16000, \"max_wav_value\": 32768},\n \"stft\": {\"filter_length\": 1024, \"hop_length\": 160, \"win_length\": 1024},\n \"mel\": {\n \"n_mel_channels\": 64,\n \"mel_fmin\": 0,\n \"mel_fmax\": 8000,\n \"freqm\": 0,\n \"timem\": 0,\n \"blur\": False,\n \"mean\": -4.63,\n \"std\": 2.74,\n \"target_length\": 1024,\n },\n },\n \"model\": {\n \"device\": \"cuda\",\n \"target\": \"audioldm.pipline.LatentDiffusion\",\n \"params\": {\n \"base_learning_rate\": 5e-06,\n \"linear_start\": 0.0015,\n \"linear_end\": 0.0195,\n \"num_timesteps_cond\": 1,\n \"log_every_t\": 200,\n \"timesteps\": 1000,\n \"first_stage_key\": \"fbank\",\n \"cond_stage_key\": \"waveform\",\n \"latent_t_size\": 256,\n \"latent_f_size\": 16,\n \"channels\": 8,\n \"cond_stage_trainable\": True,\n \"conditioning_key\": \"film\",\n \"monitor\": \"val/loss_simple_ema\",\n \"scale_by_std\": True,\n \"unet_config\": {\n \"target\": \"audioldm.latent_diffusion.openaimodel.UNetModel\",\n \"params\": {\n \"image_size\": 64,\n \"extra_film_condition_dim\": 512,\n \"extra_film_use_concat\": True,\n \"in_channels\": 8,\n \"out_channels\": 8,\n \"model_channels\": 128,\n \"attention_resolutions\": [8, 4, 2],\n \"num_res_blocks\": 2,\n \"channel_mult\": [1, 2, 3, 5],\n \"num_head_channels\": 32,\n \"use_spatial_transformer\": True,\n },\n },\n \"first_stage_config\": {\n \"base_learning_rate\": 4.5e-05,\n \"target\": \"audioldm.variational_autoencoder.autoencoder.AutoencoderKL\",\n \"params\": {\n \"monitor\": \"val/rec_loss\",\n \"image_key\": \"fbank\",\n \"subband\": 1,\n \"embed_dim\": 8,\n \"time_shuffle\": 1,\n \"ddconfig\": {\n \"double_z\": True,\n \"z_channels\": 8,\n \"resolution\": 256,\n \"downsample_time\": False,\n \"in_channels\": 1,\n \"out_ch\": 1,\n \"ch\": 128,\n \"ch_mult\": [1, 2, 4],\n \"num_res_blocks\": 2,\n \"attn_resolutions\": [],\n \"dropout\": 0.0,\n },\n },\n },\n \"cond_stage_config\": {\n \"target\": \"audioldm.clap.encoders.CLAPAudioEmbeddingClassifierFreev2\",\n \"params\": {\n \"key\": \"waveform\",\n \"sampling_rate\": 16000,\n \"embed_mode\": \"audio\",\n \"unconditional_prob\": 0.1,\n },\n },\n },\n },\n }\n \n if(\"-l-\" in model_name):\n basic_config[\"model\"][\"params\"][\"unet_config\"][\"params\"][\"model_channels\"] = 256\n basic_config[\"model\"][\"params\"][\"unet_config\"][\"params\"][\"num_head_channels\"] = 64\n elif(\"-m-\" in model_name):\n basic_config[\"model\"][\"params\"][\"unet_config\"][\"params\"][\"model_channels\"] = 192\n basic_config[\"model\"][\"params\"][\"cond_stage_config\"][\"params\"][\"amodel\"] = \"HTSAT-base\" # This model use a larger HTAST\n \n return basic_config" }, { "identifier": "get_duration", "path": "audioldm/utils.py", "snippet": "def get_duration(fname):\n with contextlib.closing(wave.open(fname, 'r')) as f:\n frames = f.getnframes()\n rate = f.getframerate()\n return frames / float(rate)" }, { "identifier": "get_bit_depth", "path": "audioldm/utils.py", "snippet": "def get_bit_depth(fname):\n with contextlib.closing(wave.open(fname, 'r')) as f:\n bit_depth = f.getsampwidth() * 8\n return bit_depth" }, { "identifier": "get_metadata", "path": "audioldm/utils.py", "snippet": "def get_metadata():\n return {\n \"audioldm-s-full\": {\n \"path\": os.path.join(\n CACHE_DIR,\n \"audioldm-s-full.ckpt\",\n ),\n \"url\": \"https://zenodo.org/record/7600541/files/audioldm-s-full?download=1\",\n },\n \"audioldm-l-full\": {\n \"path\": os.path.join(\n CACHE_DIR,\n \"audioldm-l-full.ckpt\",\n ),\n \"url\": \"https://zenodo.org/record/7698295/files/audioldm-full-l.ckpt?download=1\",\n },\n \"audioldm-s-full-v2\": {\n \"path\": os.path.join(\n CACHE_DIR,\n \"audioldm-s-full-v2.ckpt\",\n ),\n \"url\": \"https://zenodo.org/record/7698295/files/audioldm-full-s-v2.ckpt?download=1\",\n },\n \"audioldm-m-text-ft\": {\n \"path\": os.path.join(\n CACHE_DIR,\n \"audioldm-m-text-ft.ckpt\",\n ),\n \"url\": \"https://zenodo.org/record/7813012/files/audioldm-m-text-ft.ckpt?download=1\",\n },\n \"audioldm-s-text-ft\": {\n \"path\": os.path.join(\n CACHE_DIR,\n \"audioldm-s-text-ft.ckpt\",\n ),\n \"url\": \"https://zenodo.org/record/7813012/files/audioldm-s-text-ft.ckpt?download=1\",\n },\n \"audioldm-m-full\": {\n \"path\": os.path.join(\n CACHE_DIR,\n \"audioldm-m-full.ckpt\",\n ),\n \"url\": \"https://zenodo.org/record/7813012/files/audioldm-m-full.ckpt?download=1\",\n },\n }" }, { "identifier": "download_checkpoint", "path": "audioldm/utils.py", "snippet": "def download_checkpoint(checkpoint_name=\"audioldm-s-full\"):\n meta = get_metadata()\n if(checkpoint_name not in meta.keys()):\n print(\"The model name you provided is not supported. Please use one of the following: \", meta.keys())\n\n if not os.path.exists(meta[checkpoint_name][\"path\"]) or os.path.getsize(meta[checkpoint_name][\"path\"]) < 2*10**9:\n os.makedirs(os.path.dirname(meta[checkpoint_name][\"path\"]), exist_ok=True)\n print(f\"Downloading the main structure of {checkpoint_name} into {os.path.dirname(meta[checkpoint_name]['path'])}\")\n\n urllib.request.urlretrieve(meta[checkpoint_name][\"url\"], meta[checkpoint_name][\"path\"], MyProgressBar())\n print(\n \"Weights downloaded in: {} Size: {}\".format(\n meta[checkpoint_name][\"path\"],\n os.path.getsize(meta[checkpoint_name][\"path\"]),\n )\n )" }, { "identifier": "wav_to_fbank", "path": "audioldm/audio/tools.py", "snippet": "def wav_to_fbank(filename, target_length=1024, fn_STFT=None):\n assert fn_STFT is not None\n\n # mixup\n waveform = read_wav_file(filename, target_length * 160) # hop size is 160\n\n waveform = waveform[0, ...]\n waveform = torch.FloatTensor(waveform)\n\n fbank, log_magnitudes_stft, energy = get_mel_from_wav(waveform, fn_STFT)\n\n fbank = torch.FloatTensor(fbank.T)\n log_magnitudes_stft = torch.FloatTensor(log_magnitudes_stft.T)\n\n fbank, log_magnitudes_stft = _pad_spec(fbank, target_length), _pad_spec(\n log_magnitudes_stft, target_length\n )\n\n return fbank, log_magnitudes_stft, waveform" }, { "identifier": "read_wav_file", "path": "audioldm/audio/tools.py", "snippet": "def read_wav_file(filename, segment_length):\n # waveform, sr = librosa.load(filename, sr=None, mono=True) # 4 times slower\n waveform, sr = torchaudio.load(filename) # Faster!!!\n waveform = torchaudio.functional.resample(waveform, orig_freq=sr, new_freq=16000)\n waveform = waveform.numpy()[0, ...]\n waveform = normalize_wav(waveform)\n waveform = waveform[None, ...]\n waveform = pad_wav(waveform, segment_length)\n \n waveform = waveform / np.max(np.abs(waveform))\n waveform = 0.5 * waveform\n \n return waveform" }, { "identifier": "TacotronSTFT", "path": "audioldm/audio/stft.py", "snippet": "class TacotronSTFT(torch.nn.Module):\n def __init__(\n self,\n filter_length,\n hop_length,\n win_length,\n n_mel_channels,\n sampling_rate,\n mel_fmin,\n mel_fmax,\n ):\n super(TacotronSTFT, self).__init__()\n self.n_mel_channels = n_mel_channels\n self.sampling_rate = sampling_rate\n self.stft_fn = STFT(filter_length, hop_length, win_length)\n mel_basis = librosa_mel_fn(\n sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax\n )\n mel_basis = torch.from_numpy(mel_basis).float()\n self.register_buffer(\"mel_basis\", mel_basis)\n\n def spectral_normalize(self, magnitudes, normalize_fun):\n output = dynamic_range_compression(magnitudes, normalize_fun)\n return output\n\n def spectral_de_normalize(self, magnitudes):\n output = dynamic_range_decompression(magnitudes)\n return output\n\n def mel_spectrogram(self, y, normalize_fun=torch.log):\n \"\"\"Computes mel-spectrograms from a batch of waves\n PARAMS\n ------\n y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]\n\n RETURNS\n -------\n mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)\n \"\"\"\n assert torch.min(y.data) >= -1, torch.min(y.data)\n assert torch.max(y.data) <= 1, torch.max(y.data)\n\n magnitudes, phases = self.stft_fn.transform(y)\n magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output, normalize_fun)\n energy = torch.norm(magnitudes, dim=1)\n\n log_magnitudes = self.spectral_normalize(magnitudes, normalize_fun)\n\n return mel_output, log_magnitudes, energy" }, { "identifier": "DDIMSampler", "path": "audioldm/latent_diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n # iterator = gr.Progress().tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps, leave=False)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO deterministic forward pass?\n img = (\n img_orig * mask + (1.0 - mask) * img\n ) # In the first sampling step, img is pure gaussian noise\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n img, pred_x0 = outs\n if callback:\n callback(i)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n # iterator = gr.Progress().tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n # When unconditional_guidance_scale == 1: only e_t\n # When unconditional_guidance_scale == 0: only unconditional\n # When unconditional_guidance_scale > 1: add more unconditional guidance\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise # TODO\n return x_prev, pred_x0" } ]
import os import argparse import yaml import torch import os from torch import autocast from tqdm import tqdm, trange from audioldm import LatentDiffusion, seed_everything from audioldm.utils import default_audioldm_config, get_duration, get_bit_depth, get_metadata, download_checkpoint from audioldm.audio import wav_to_fbank, TacotronSTFT, read_wav_file from audioldm.latent_diffusion.ddim import DDIMSampler from einops import repeat
14,566
latent_diffusion = LatentDiffusion(**config["model"]["params"]) resume_from_checkpoint = ckpt_path checkpoint = torch.load(resume_from_checkpoint, map_location=device) latent_diffusion.load_state_dict(checkpoint["state_dict"]) latent_diffusion.eval() latent_diffusion = latent_diffusion.to(device) latent_diffusion.cond_stage_model.embed_mode = "text" return latent_diffusion def duration_to_latent_t_size(duration): return int(duration * 25.6) def set_cond_audio(latent_diffusion): latent_diffusion.cond_stage_key = "waveform" latent_diffusion.cond_stage_model.embed_mode="audio" return latent_diffusion def set_cond_text(latent_diffusion): latent_diffusion.cond_stage_key = "text" latent_diffusion.cond_stage_model.embed_mode="text" return latent_diffusion def text_to_audio( latent_diffusion, text, original_audio_file_path = None, seed=42, ddim_steps=200, duration=10, batchsize=1, guidance_scale=2.5, n_candidate_gen_per_text=3, config=None, ): seed_everything(int(seed)) waveform = None if(original_audio_file_path is not None): waveform = read_wav_file(original_audio_file_path, int(duration * 102.4) * 160) batch = make_batch_for_text_to_audio(text, waveform=waveform, batchsize=batchsize) latent_diffusion.latent_t_size = duration_to_latent_t_size(duration) if(waveform is not None): print("Generate audio that has similar content as %s" % original_audio_file_path) latent_diffusion = set_cond_audio(latent_diffusion) else: print("Generate audio using text %s" % text) latent_diffusion = set_cond_text(latent_diffusion) with torch.no_grad(): waveform = latent_diffusion.generate_sample( [batch], unconditional_guidance_scale=guidance_scale, ddim_steps=ddim_steps, n_candidate_gen_per_text=n_candidate_gen_per_text, duration=duration, ) return waveform def style_transfer( latent_diffusion, text, original_audio_file_path, transfer_strength, seed=42, duration=10, batchsize=1, guidance_scale=2.5, ddim_steps=200, config=None, ): if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") assert original_audio_file_path is not None, "You need to provide the original audio file path" audio_file_duration = get_duration(original_audio_file_path) assert get_bit_depth(original_audio_file_path) == 16, "The bit depth of the original audio file %s must be 16" % original_audio_file_path # if(duration > 20): # print("Warning: The duration of the audio file %s must be less than 20 seconds. Longer duration will result in Nan in model output (we are still debugging that); Automatically set duration to 20 seconds") # duration = 20 if(duration >= audio_file_duration): print("Warning: Duration you specified %s-seconds must equal or smaller than the audio file duration %ss" % (duration, audio_file_duration)) duration = round_up_duration(audio_file_duration) print("Set new duration as %s-seconds" % duration) # duration = round_up_duration(duration) latent_diffusion = set_cond_text(latent_diffusion) if config is not None: assert type(config) is str config = yaml.load(open(config, "r"), Loader=yaml.FullLoader) else: config = default_audioldm_config() seed_everything(int(seed)) # latent_diffusion.latent_t_size = duration_to_latent_t_size(duration) latent_diffusion.cond_stage_model.embed_mode = "text" fn_STFT = TacotronSTFT( config["preprocessing"]["stft"]["filter_length"], config["preprocessing"]["stft"]["hop_length"], config["preprocessing"]["stft"]["win_length"], config["preprocessing"]["mel"]["n_mel_channels"], config["preprocessing"]["audio"]["sampling_rate"], config["preprocessing"]["mel"]["mel_fmin"], config["preprocessing"]["mel"]["mel_fmax"], )
def make_batch_for_text_to_audio(text, waveform=None, fbank=None, batchsize=1): text = [text] * batchsize if batchsize < 1: print("Warning: Batchsize must be at least 1. Batchsize is set to .") if(fbank is None): fbank = torch.zeros((batchsize, 1024, 64)) # Not used, here to keep the code format else: fbank = torch.FloatTensor(fbank) fbank = fbank.expand(batchsize, 1024, 64) assert fbank.size(0) == batchsize stft = torch.zeros((batchsize, 1024, 512)) # Not used if(waveform is None): waveform = torch.zeros((batchsize, 160000)) # Not used else: waveform = torch.FloatTensor(waveform) waveform = waveform.expand(batchsize, -1) assert waveform.size(0) == batchsize fname = [""] * batchsize # Not used batch = ( fbank, stft, None, fname, waveform, text, ) return batch def round_up_duration(duration): return int(round(duration/2.5) + 1) * 2.5 def build_model( ckpt_path=None, config=None, model_name="audioldm-s-full" ): print("Load AudioLDM: %s", model_name) if(ckpt_path is None): ckpt_path = get_metadata()[model_name]["path"] if(not os.path.exists(ckpt_path)): download_checkpoint(model_name) if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") if config is not None: assert type(config) is str config = yaml.load(open(config, "r"), Loader=yaml.FullLoader) else: config = default_audioldm_config(model_name) # Use text as condition instead of using waveform during training config["model"]["params"]["device"] = device config["model"]["params"]["cond_stage_key"] = "text" # No normalization here latent_diffusion = LatentDiffusion(**config["model"]["params"]) resume_from_checkpoint = ckpt_path checkpoint = torch.load(resume_from_checkpoint, map_location=device) latent_diffusion.load_state_dict(checkpoint["state_dict"]) latent_diffusion.eval() latent_diffusion = latent_diffusion.to(device) latent_diffusion.cond_stage_model.embed_mode = "text" return latent_diffusion def duration_to_latent_t_size(duration): return int(duration * 25.6) def set_cond_audio(latent_diffusion): latent_diffusion.cond_stage_key = "waveform" latent_diffusion.cond_stage_model.embed_mode="audio" return latent_diffusion def set_cond_text(latent_diffusion): latent_diffusion.cond_stage_key = "text" latent_diffusion.cond_stage_model.embed_mode="text" return latent_diffusion def text_to_audio( latent_diffusion, text, original_audio_file_path = None, seed=42, ddim_steps=200, duration=10, batchsize=1, guidance_scale=2.5, n_candidate_gen_per_text=3, config=None, ): seed_everything(int(seed)) waveform = None if(original_audio_file_path is not None): waveform = read_wav_file(original_audio_file_path, int(duration * 102.4) * 160) batch = make_batch_for_text_to_audio(text, waveform=waveform, batchsize=batchsize) latent_diffusion.latent_t_size = duration_to_latent_t_size(duration) if(waveform is not None): print("Generate audio that has similar content as %s" % original_audio_file_path) latent_diffusion = set_cond_audio(latent_diffusion) else: print("Generate audio using text %s" % text) latent_diffusion = set_cond_text(latent_diffusion) with torch.no_grad(): waveform = latent_diffusion.generate_sample( [batch], unconditional_guidance_scale=guidance_scale, ddim_steps=ddim_steps, n_candidate_gen_per_text=n_candidate_gen_per_text, duration=duration, ) return waveform def style_transfer( latent_diffusion, text, original_audio_file_path, transfer_strength, seed=42, duration=10, batchsize=1, guidance_scale=2.5, ddim_steps=200, config=None, ): if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") assert original_audio_file_path is not None, "You need to provide the original audio file path" audio_file_duration = get_duration(original_audio_file_path) assert get_bit_depth(original_audio_file_path) == 16, "The bit depth of the original audio file %s must be 16" % original_audio_file_path # if(duration > 20): # print("Warning: The duration of the audio file %s must be less than 20 seconds. Longer duration will result in Nan in model output (we are still debugging that); Automatically set duration to 20 seconds") # duration = 20 if(duration >= audio_file_duration): print("Warning: Duration you specified %s-seconds must equal or smaller than the audio file duration %ss" % (duration, audio_file_duration)) duration = round_up_duration(audio_file_duration) print("Set new duration as %s-seconds" % duration) # duration = round_up_duration(duration) latent_diffusion = set_cond_text(latent_diffusion) if config is not None: assert type(config) is str config = yaml.load(open(config, "r"), Loader=yaml.FullLoader) else: config = default_audioldm_config() seed_everything(int(seed)) # latent_diffusion.latent_t_size = duration_to_latent_t_size(duration) latent_diffusion.cond_stage_model.embed_mode = "text" fn_STFT = TacotronSTFT( config["preprocessing"]["stft"]["filter_length"], config["preprocessing"]["stft"]["hop_length"], config["preprocessing"]["stft"]["win_length"], config["preprocessing"]["mel"]["n_mel_channels"], config["preprocessing"]["audio"]["sampling_rate"], config["preprocessing"]["mel"]["mel_fmin"], config["preprocessing"]["mel"]["mel_fmax"], )
mel, _, _ = wav_to_fbank(
7
2023-11-14 23:29:31+00:00
24k
BraveGroup/Drive-WM
src/diffusers/models/unet_3d_blocks.py
[ { "identifier": "is_torch_version", "path": "src/diffusers/utils/import_utils.py", "snippet": "def is_torch_version(operation: str, version: str):\n \"\"\"\n Args:\n Compares the current PyTorch version to a given reference with an operation.\n operation (`str`):\n A string representation of an operator, such as `\">\"` or `\"<=\"`\n version (`str`):\n A string version of PyTorch\n \"\"\"\n return compare_versions(parse(_torch_version), operation, version)" }, { "identifier": "apply_freeu", "path": "src/diffusers/utils/torch_utils.py", "snippet": "def apply_freeu(\n resolution_idx: int, hidden_states: torch.Tensor, res_hidden_states: torch.Tensor, **freeu_kwargs\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Applies the FreeU mechanism as introduced in https:\n //arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU.\n\n Args:\n resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied.\n hidden_states (`torch.Tensor`): Inputs to the underlying block.\n res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block.\n s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features.\n s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features.\n b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.\n b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.\n \"\"\"\n if resolution_idx == 0:\n num_half_channels = hidden_states.shape[1] // 2\n hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs[\"b1\"]\n res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs[\"s1\"])\n if resolution_idx == 1:\n num_half_channels = hidden_states.shape[1] // 2\n hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs[\"b2\"]\n res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs[\"s2\"])\n\n return hidden_states, res_hidden_states" }, { "identifier": "DualTransformer2DModel", "path": "src/diffusers/models/dual_transformer_2d.py", "snippet": "class DualTransformer2DModel(nn.Module):\n \"\"\"\n Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n Pass if the input is continuous. The number of channels in the input and output.\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.\n sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.\n Note that this is fixed at training time as it is used for learning a number of position embeddings. See\n `ImagePositionalEmbeddings`.\n num_vector_embeds (`int`, *optional*):\n Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.\n Includes the class for the masked latent pixel.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to be used in feed-forward.\n num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.\n The number of diffusion steps used during training. Note that this is fixed at training time as it is used\n to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for\n up to but not more than steps than `num_embeds_ada_norm`.\n attention_bias (`bool`, *optional*):\n Configure if the TransformerBlocks' attention should contain a bias parameter.\n \"\"\"\n\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n num_vector_embeds: Optional[int] = None,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n ):\n super().__init__()\n self.transformers = nn.ModuleList(\n [\n Transformer2DModel(\n num_attention_heads=num_attention_heads,\n attention_head_dim=attention_head_dim,\n in_channels=in_channels,\n num_layers=num_layers,\n dropout=dropout,\n norm_num_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attention_bias=attention_bias,\n sample_size=sample_size,\n num_vector_embeds=num_vector_embeds,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n )\n for _ in range(2)\n ]\n )\n\n # Variables that can be set by a pipeline:\n\n # The ratio of transformer1 to transformer2's output states to be combined during inference\n self.mix_ratio = 0.5\n\n # The shape of `encoder_hidden_states` is expected to be\n # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`\n self.condition_lengths = [77, 257]\n\n # Which transformer to use to encode which condition.\n # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`\n self.transformer_index_for_condition = [1, 0]\n\n def forward(\n self,\n hidden_states,\n encoder_hidden_states,\n timestep=None,\n attention_mask=None,\n cross_attention_kwargs=None,\n return_dict: bool = True,\n ):\n \"\"\"\n Args:\n hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.\n When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input\n hidden_states.\n encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n timestep ( `torch.long`, *optional*):\n Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.\n attention_mask (`torch.FloatTensor`, *optional*):\n Optional attention mask to be applied in Attention.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:\n [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n input_states = hidden_states\n\n encoded_states = []\n tokens_start = 0\n # attention_mask is not used yet\n for i in range(2):\n # for each of the two transformers, pass the corresponding condition tokens\n condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]\n transformer_index = self.transformer_index_for_condition[i]\n encoded_state = self.transformers[transformer_index](\n input_states,\n encoder_hidden_states=condition_state,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n encoded_states.append(encoded_state - input_states)\n tokens_start += self.condition_lengths[i]\n\n output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)\n output_states = output_states + input_states\n\n if not return_dict:\n return (output_states,)\n\n return Transformer2DModelOutput(sample=output_states)" }, { "identifier": "Downsample2D", "path": "src/diffusers/models/resnet.py", "snippet": "class Downsample2D(nn.Module):\n \"\"\"A 2D downsampling layer with an optional convolution.\n\n Parameters:\n channels (`int`):\n number of channels in the inputs and outputs.\n use_conv (`bool`, default `False`):\n option to use a convolution.\n out_channels (`int`, optional):\n number of output channels. Defaults to `channels`.\n padding (`int`, default `1`):\n padding for the convolution.\n name (`str`, default `conv`):\n name of the downsampling 2D layer.\n \"\"\"\n\n def __init__(\n self,\n channels: int,\n use_conv: bool = False,\n out_channels: Optional[int] = None,\n padding: int = 1,\n name: str = \"conv\",\n ):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.padding = padding\n stride = 2\n self.name = name\n conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv\n\n if use_conv:\n conv = conv_cls(self.channels, self.out_channels, 3, stride=stride, padding=padding)\n else:\n assert self.channels == self.out_channels\n conv = nn.AvgPool2d(kernel_size=stride, stride=stride)\n\n # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed\n if name == \"conv\":\n self.Conv2d_0 = conv\n self.conv = conv\n elif name == \"Conv2d_0\":\n self.conv = conv\n else:\n self.conv = conv\n\n def forward(self, hidden_states: torch.FloatTensor, scale: float = 1.0) -> torch.FloatTensor:\n assert hidden_states.shape[1] == self.channels\n\n if self.use_conv and self.padding == 0:\n pad = (0, 1, 0, 1)\n hidden_states = F.pad(hidden_states, pad, mode=\"constant\", value=0)\n\n assert hidden_states.shape[1] == self.channels\n\n if not USE_PEFT_BACKEND:\n if isinstance(self.conv, LoRACompatibleConv):\n hidden_states = self.conv(hidden_states, scale)\n else:\n hidden_states = self.conv(hidden_states)\n else:\n hidden_states = self.conv(hidden_states)\n\n return hidden_states" }, { "identifier": "ResnetBlock2D", "path": "src/diffusers/models/resnet.py", "snippet": "class ResnetBlock2D(nn.Module):\n r\"\"\"\n A Resnet block.\n\n Parameters:\n in_channels (`int`): The number of channels in the input.\n out_channels (`int`, *optional*, default to be `None`):\n The number of output channels for the first conv2d layer. If None, same as `in_channels`.\n dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use.\n temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding.\n groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer.\n groups_out (`int`, *optional*, default to None):\n The number of groups to use for the second normalization layer. if set to None, same as `groups`.\n eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization.\n non_linearity (`str`, *optional*, default to `\"swish\"`): the activation function to use.\n time_embedding_norm (`str`, *optional*, default to `\"default\"` ): Time scale shift config.\n By default, apply timestep embedding conditioning with a simple shift mechanism. Choose \"scale_shift\" or\n \"ada_group\" for a stronger conditioning with scale and shift.\n kernel (`torch.FloatTensor`, optional, default to None): FIR filter, see\n [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`].\n output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output.\n use_in_shortcut (`bool`, *optional*, default to `True`):\n If `True`, add a 1x1 nn.conv2d layer for skip-connection.\n up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer.\n down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer.\n conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the\n `conv_shortcut` output.\n conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output.\n If None, same as `out_channels`.\n \"\"\"\n\n def __init__(\n self,\n *,\n in_channels: int,\n out_channels: Optional[int] = None,\n conv_shortcut: bool = False,\n dropout: float = 0.0,\n temb_channels: int = 512,\n groups: int = 32,\n groups_out: Optional[int] = None,\n pre_norm: bool = True,\n eps: float = 1e-6,\n non_linearity: str = \"swish\",\n skip_time_act: bool = False,\n time_embedding_norm: str = \"default\", # default, scale_shift, ada_group, spatial\n kernel: Optional[torch.FloatTensor] = None,\n output_scale_factor: float = 1.0,\n use_in_shortcut: Optional[bool] = None,\n up: bool = False,\n down: bool = False,\n conv_shortcut_bias: bool = True,\n conv_2d_out_channels: Optional[int] = None,\n ):\n super().__init__()\n self.pre_norm = pre_norm\n self.pre_norm = True\n self.in_channels = in_channels\n out_channels = in_channels if out_channels is None else out_channels\n self.out_channels = out_channels\n self.use_conv_shortcut = conv_shortcut\n self.up = up\n self.down = down\n self.output_scale_factor = output_scale_factor\n self.time_embedding_norm = time_embedding_norm\n self.skip_time_act = skip_time_act\n\n linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear\n conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv\n\n if groups_out is None:\n groups_out = groups\n\n if self.time_embedding_norm == \"ada_group\":\n self.norm1 = AdaGroupNorm(temb_channels, in_channels, groups, eps=eps)\n elif self.time_embedding_norm == \"spatial\":\n self.norm1 = SpatialNorm(in_channels, temb_channels)\n else:\n self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)\n\n self.conv1 = conv_cls(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n if temb_channels is not None:\n if self.time_embedding_norm == \"default\":\n self.time_emb_proj = linear_cls(temb_channels, out_channels)\n elif self.time_embedding_norm == \"scale_shift\":\n self.time_emb_proj = linear_cls(temb_channels, 2 * out_channels)\n elif self.time_embedding_norm == \"ada_group\" or self.time_embedding_norm == \"spatial\":\n self.time_emb_proj = None\n else:\n raise ValueError(f\"unknown time_embedding_norm : {self.time_embedding_norm} \")\n else:\n self.time_emb_proj = None\n\n if self.time_embedding_norm == \"ada_group\":\n self.norm2 = AdaGroupNorm(temb_channels, out_channels, groups_out, eps=eps)\n elif self.time_embedding_norm == \"spatial\":\n self.norm2 = SpatialNorm(out_channels, temb_channels)\n else:\n self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)\n\n self.dropout = torch.nn.Dropout(dropout)\n conv_2d_out_channels = conv_2d_out_channels or out_channels\n self.conv2 = conv_cls(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1)\n\n self.nonlinearity = get_activation(non_linearity)\n\n self.upsample = self.downsample = None\n if self.up:\n if kernel == \"fir\":\n fir_kernel = (1, 3, 3, 1)\n self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel)\n elif kernel == \"sde_vp\":\n self.upsample = partial(F.interpolate, scale_factor=2.0, mode=\"nearest\")\n else:\n self.upsample = Upsample2D(in_channels, use_conv=False)\n elif self.down:\n if kernel == \"fir\":\n fir_kernel = (1, 3, 3, 1)\n self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel)\n elif kernel == \"sde_vp\":\n self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2)\n else:\n self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name=\"op\")\n\n self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut\n\n self.conv_shortcut = None\n if self.use_in_shortcut:\n self.conv_shortcut = conv_cls(\n in_channels, conv_2d_out_channels, kernel_size=1, stride=1, padding=0, bias=conv_shortcut_bias\n )\n\n def forward(\n self, input_tensor: torch.FloatTensor, temb: torch.FloatTensor, scale: float = 1.0\n ) -> torch.FloatTensor:\n hidden_states = input_tensor\n\n if self.time_embedding_norm == \"ada_group\" or self.time_embedding_norm == \"spatial\":\n hidden_states = self.norm1(hidden_states, temb)\n else:\n hidden_states = self.norm1(hidden_states)\n\n hidden_states = self.nonlinearity(hidden_states)\n\n if self.upsample is not None:\n # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984\n if hidden_states.shape[0] >= 64:\n input_tensor = input_tensor.contiguous()\n hidden_states = hidden_states.contiguous()\n input_tensor = (\n self.upsample(input_tensor, scale=scale)\n if isinstance(self.upsample, Upsample2D)\n else self.upsample(input_tensor)\n )\n hidden_states = (\n self.upsample(hidden_states, scale=scale)\n if isinstance(self.upsample, Upsample2D)\n else self.upsample(hidden_states)\n )\n elif self.downsample is not None:\n input_tensor = (\n self.downsample(input_tensor, scale=scale)\n if isinstance(self.downsample, Downsample2D)\n else self.downsample(input_tensor)\n )\n hidden_states = (\n self.downsample(hidden_states, scale=scale)\n if isinstance(self.downsample, Downsample2D)\n else self.downsample(hidden_states)\n )\n\n hidden_states = self.conv1(hidden_states, scale) if not USE_PEFT_BACKEND else self.conv1(hidden_states)\n\n if self.time_emb_proj is not None:\n if not self.skip_time_act:\n temb = self.nonlinearity(temb)\n temb = (\n self.time_emb_proj(temb, scale)[:, :, None, None]\n if not USE_PEFT_BACKEND\n else self.time_emb_proj(temb)[:, :, None, None]\n )\n\n if temb is not None and self.time_embedding_norm == \"default\":\n hidden_states = hidden_states + temb\n\n if self.time_embedding_norm == \"ada_group\" or self.time_embedding_norm == \"spatial\":\n hidden_states = self.norm2(hidden_states, temb)\n else:\n hidden_states = self.norm2(hidden_states)\n\n if temb is not None and self.time_embedding_norm == \"scale_shift\":\n scale, shift = torch.chunk(temb, 2, dim=1)\n hidden_states = hidden_states * (1 + scale) + shift\n\n hidden_states = self.nonlinearity(hidden_states)\n\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.conv2(hidden_states, scale) if not USE_PEFT_BACKEND else self.conv2(hidden_states)\n\n if self.conv_shortcut is not None:\n input_tensor = (\n self.conv_shortcut(input_tensor, scale) if not USE_PEFT_BACKEND else self.conv_shortcut(input_tensor)\n )\n\n output_tensor = (input_tensor + hidden_states) / self.output_scale_factor\n\n return output_tensor" }, { "identifier": "TemporalConvLayer", "path": "src/diffusers/models/resnet.py", "snippet": "class TemporalConvLayer(nn.Module):\n \"\"\"\n Temporal convolutional layer that can be used for video (sequence of images) input Code mostly copied from:\n https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/models/multi_modal/video_synthesis/unet_sd.py#L1016\n\n Parameters:\n in_dim (`int`): Number of input channels.\n out_dim (`int`): Number of output channels.\n dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use.\n \"\"\"\n\n def __init__(self, in_dim: int, out_dim: Optional[int] = None, dropout: float = 0.0, norm_num_groups: int = 32):\n super().__init__()\n out_dim = out_dim or in_dim\n self.in_dim = in_dim\n self.out_dim = out_dim\n\n # conv layers\n self.conv1 = nn.Sequential(\n nn.GroupNorm(norm_num_groups, in_dim), nn.SiLU(), nn.Conv3d(in_dim, out_dim, (3, 1, 1), padding=(1, 0, 0))\n )\n self.conv2 = nn.Sequential(\n nn.GroupNorm(norm_num_groups, out_dim),\n nn.SiLU(),\n nn.Dropout(dropout),\n nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),\n )\n self.conv3 = nn.Sequential(\n nn.GroupNorm(norm_num_groups, out_dim),\n nn.SiLU(),\n nn.Dropout(dropout),\n nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),\n )\n self.conv4 = nn.Sequential(\n nn.GroupNorm(norm_num_groups, out_dim),\n nn.SiLU(),\n nn.Dropout(dropout),\n nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),\n )\n\n # zero out the last layer params,so the conv block is identity\n nn.init.zeros_(self.conv4[-1].weight)\n nn.init.zeros_(self.conv4[-1].bias)\n\n def forward(self, hidden_states: torch.Tensor, num_frames: int = 1) -> torch.Tensor:\n hidden_states = (\n hidden_states[None, :].reshape((-1, num_frames) + hidden_states.shape[1:]).permute(0, 2, 1, 3, 4)\n )\n\n identity = hidden_states\n hidden_states = self.conv1(hidden_states)\n hidden_states = self.conv2(hidden_states)\n hidden_states = self.conv3(hidden_states)\n hidden_states = self.conv4(hidden_states)\n\n hidden_states = identity + hidden_states\n\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(\n (hidden_states.shape[0] * hidden_states.shape[2], -1) + hidden_states.shape[3:]\n )\n return hidden_states" }, { "identifier": "Upsample2D", "path": "src/diffusers/models/resnet.py", "snippet": "class Upsample2D(nn.Module):\n \"\"\"A 2D upsampling layer with an optional convolution.\n\n Parameters:\n channels (`int`):\n number of channels in the inputs and outputs.\n use_conv (`bool`, default `False`):\n option to use a convolution.\n use_conv_transpose (`bool`, default `False`):\n option to use a convolution transpose.\n out_channels (`int`, optional):\n number of output channels. Defaults to `channels`.\n name (`str`, default `conv`):\n name of the upsampling 2D layer.\n \"\"\"\n\n def __init__(\n self,\n channels: int,\n use_conv: bool = False,\n use_conv_transpose: bool = False,\n out_channels: Optional[int] = None,\n name: str = \"conv\",\n ):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.use_conv_transpose = use_conv_transpose\n self.name = name\n conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv\n\n conv = None\n if use_conv_transpose:\n conv = nn.ConvTranspose2d(channels, self.out_channels, 4, 2, 1)\n elif use_conv:\n conv = conv_cls(self.channels, self.out_channels, 3, padding=1)\n\n # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed\n if name == \"conv\":\n self.conv = conv\n else:\n self.Conv2d_0 = conv\n\n def forward(\n self, hidden_states: torch.FloatTensor, output_size: Optional[int] = None, scale: float = 1.0\n ) -> torch.FloatTensor:\n assert hidden_states.shape[1] == self.channels\n\n if self.use_conv_transpose:\n return self.conv(hidden_states)\n\n # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16\n # TODO(Suraj): Remove this cast once the issue is fixed in PyTorch\n # https://github.com/pytorch/pytorch/issues/86679\n dtype = hidden_states.dtype\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(torch.float32)\n\n # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984\n if hidden_states.shape[0] >= 64:\n hidden_states = hidden_states.contiguous()\n\n # if `output_size` is passed we force the interpolation output\n # size and do not make use of `scale_factor=2`\n if output_size is None:\n hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode=\"nearest\")\n else:\n hidden_states = F.interpolate(hidden_states, size=output_size, mode=\"nearest\")\n\n # If the input is bfloat16, we cast back to bfloat16\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(dtype)\n\n # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed\n if self.use_conv:\n if self.name == \"conv\":\n if isinstance(self.conv, LoRACompatibleConv) and not USE_PEFT_BACKEND:\n hidden_states = self.conv(hidden_states, scale)\n else:\n hidden_states = self.conv(hidden_states)\n else:\n if isinstance(self.Conv2d_0, LoRACompatibleConv) and not USE_PEFT_BACKEND:\n hidden_states = self.Conv2d_0(hidden_states, scale)\n else:\n hidden_states = self.Conv2d_0(hidden_states)\n\n return hidden_states" }, { "identifier": "Transformer2DModel", "path": "src/diffusers/models/transformer_2d.py", "snippet": "class Transformer2DModel(ModelMixin, ConfigMixin):\n \"\"\"\n A 2D Transformer model for image-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n The number of channels in the input and output (specify if the input is **continuous**).\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.\n sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).\n This is fixed during training since it is used to learn a number of position embeddings.\n num_vector_embeds (`int`, *optional*):\n The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**).\n Includes the class for the masked latent pixel.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to use in feed-forward.\n num_embeds_ada_norm ( `int`, *optional*):\n The number of diffusion steps used during training. Pass if at least one of the norm_layers is\n `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are\n added to the hidden states.\n\n During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`.\n attention_bias (`bool`, *optional*):\n Configure if the `TransformerBlocks` attention should contain a bias parameter.\n \"\"\"\n\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n num_vector_embeds: Optional[int] = None,\n patch_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n use_linear_projection: bool = False,\n only_cross_attention: bool = False,\n double_self_attention: bool = False,\n upcast_attention: bool = False,\n norm_type: str = \"layer_norm\",\n norm_elementwise_affine: bool = True,\n norm_eps: float = 1e-5,\n attention_type: str = \"default\",\n caption_channels: int = None,\n ):\n super().__init__()\n self.use_linear_projection = use_linear_projection\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv\n linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear\n\n # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`\n # Define whether input is continuous or discrete depending on configuration\n self.is_input_continuous = (in_channels is not None) and (patch_size is None)\n self.is_input_vectorized = num_vector_embeds is not None\n self.is_input_patches = in_channels is not None and patch_size is not None\n\n if norm_type == \"layer_norm\" and num_embeds_ada_norm is not None:\n deprecation_message = (\n f\"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or\"\n \" incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config.\"\n \" Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect\"\n \" results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it\"\n \" would be very nice if you could open a Pull request for the `transformer/config.json` file\"\n )\n deprecate(\"norm_type!=num_embeds_ada_norm\", \"1.0.0\", deprecation_message, standard_warn=False)\n norm_type = \"ada_norm\"\n\n if self.is_input_continuous and self.is_input_vectorized:\n raise ValueError(\n f\"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make\"\n \" sure that either `in_channels` or `num_vector_embeds` is None.\"\n )\n elif self.is_input_vectorized and self.is_input_patches:\n raise ValueError(\n f\"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make\"\n \" sure that either `num_vector_embeds` or `num_patches` is None.\"\n )\n elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches:\n raise ValueError(\n f\"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:\"\n f\" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None.\"\n )\n\n # 2. Define input layers\n if self.is_input_continuous:\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n if use_linear_projection:\n self.proj_in = linear_cls(in_channels, inner_dim)\n else:\n self.proj_in = conv_cls(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)\n elif self.is_input_vectorized:\n assert sample_size is not None, \"Transformer2DModel over discrete input must provide sample_size\"\n assert num_vector_embeds is not None, \"Transformer2DModel over discrete input must provide num_embed\"\n\n self.height = sample_size\n self.width = sample_size\n self.num_vector_embeds = num_vector_embeds\n self.num_latent_pixels = self.height * self.width\n\n self.latent_image_embedding = ImagePositionalEmbeddings(\n num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width\n )\n elif self.is_input_patches:\n assert sample_size is not None, \"Transformer2DModel over patched input must provide sample_size\"\n\n self.height = sample_size\n self.width = sample_size\n\n self.patch_size = patch_size\n interpolation_scale = self.config.sample_size // 64 # => 64 (= 512 pixart) has interpolation scale 1\n interpolation_scale = max(interpolation_scale, 1)\n self.pos_embed = PatchEmbed(\n height=sample_size,\n width=sample_size,\n patch_size=patch_size,\n in_channels=in_channels,\n embed_dim=inner_dim,\n interpolation_scale=interpolation_scale,\n )\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n attention_bias=attention_bias,\n only_cross_attention=only_cross_attention,\n double_self_attention=double_self_attention,\n upcast_attention=upcast_attention,\n norm_type=norm_type,\n norm_elementwise_affine=norm_elementwise_affine,\n norm_eps=norm_eps,\n attention_type=attention_type,\n )\n for d in range(num_layers)\n ]\n )\n\n # 4. Define output layers\n self.out_channels = in_channels if out_channels is None else out_channels\n if self.is_input_continuous:\n # TODO: should use out_channels for continuous projections\n if use_linear_projection:\n self.proj_out = linear_cls(inner_dim, in_channels)\n else:\n self.proj_out = conv_cls(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)\n elif self.is_input_vectorized:\n self.norm_out = nn.LayerNorm(inner_dim)\n self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1)\n elif self.is_input_patches and norm_type != \"ada_norm_single\":\n self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)\n self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim)\n self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)\n elif self.is_input_patches and norm_type == \"ada_norm_single\":\n self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)\n self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5)\n self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)\n\n # 5. PixArt-Alpha blocks.\n self.adaln_single = None\n self.use_additional_conditions = False\n if norm_type == \"ada_norm_single\":\n self.use_additional_conditions = self.config.sample_size == 128\n # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use\n # additional conditions until we find better name\n self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=self.use_additional_conditions)\n\n self.caption_projection = None\n if caption_channels is not None:\n self.caption_projection = CaptionProjection(in_features=caption_channels, hidden_size=inner_dim)\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n added_cond_kwargs: Dict[str, torch.Tensor] = None,\n class_labels: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n attention_mask: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ):\n \"\"\"\n The [`Transformer2DModel`] forward method.\n\n Args:\n hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):\n Input `hidden_states`.\n encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n timestep ( `torch.LongTensor`, *optional*):\n Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.\n class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):\n Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in\n `AdaLayerZeroNorm`.\n cross_attention_kwargs ( `Dict[str, Any]`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).\n attention_mask ( `torch.Tensor`, *optional*):\n An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask\n is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large\n negative values to the attention scores corresponding to \"discard\" tokens.\n encoder_attention_mask ( `torch.Tensor`, *optional*):\n Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:\n\n * Mask `(batch, sequence_length)` True = keep, False = discard.\n * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.\n\n If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format\n above. This bias will be added to the cross-attention scores.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n\n Returns:\n If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a\n `tuple` where the first element is the sample tensor.\n \"\"\"\n # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.\n # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.\n # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.\n # expects mask of shape:\n # [batch, key_tokens]\n # adds singleton query_tokens dimension:\n # [batch, 1, key_tokens]\n # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:\n # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)\n # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)\n if attention_mask is not None and attention_mask.ndim == 2:\n # assume that mask is expressed as:\n # (1 = keep, 0 = discard)\n # convert mask into a bias that can be added to attention scores:\n # (keep = +0, discard = -10000.0)\n attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # convert encoder_attention_mask to a bias the same way we do for attention_mask\n if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:\n encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0\n encoder_attention_mask = encoder_attention_mask.unsqueeze(1)\n\n # Retrieve lora scale.\n lora_scale = cross_attention_kwargs.get(\"scale\", 1.0) if cross_attention_kwargs is not None else 1.0\n\n # 1. Input\n if self.is_input_continuous:\n batch, _, height, width = hidden_states.shape\n residual = hidden_states\n\n hidden_states = self.norm(hidden_states)\n if not self.use_linear_projection:\n hidden_states = (\n self.proj_in(hidden_states, scale=lora_scale)\n if not USE_PEFT_BACKEND\n else self.proj_in(hidden_states)\n )\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)\n else:\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)\n hidden_states = (\n self.proj_in(hidden_states, scale=lora_scale)\n if not USE_PEFT_BACKEND\n else self.proj_in(hidden_states)\n )\n\n elif self.is_input_vectorized:\n hidden_states = self.latent_image_embedding(hidden_states)\n elif self.is_input_patches:\n height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size\n hidden_states = self.pos_embed(hidden_states)\n\n if self.adaln_single is not None:\n if self.use_additional_conditions and added_cond_kwargs is None:\n raise ValueError(\n \"`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`.\"\n )\n batch_size = hidden_states.shape[0]\n timestep, embedded_timestep = self.adaln_single(\n timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype\n )\n\n # 2. Blocks\n if self.caption_projection is not None:\n batch_size = hidden_states.shape[0]\n encoder_hidden_states = self.caption_projection(encoder_hidden_states)\n encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1])\n\n for block in self.transformer_blocks:\n if self.training and self.gradient_checkpointing:\n hidden_states = torch.utils.checkpoint.checkpoint(\n block,\n hidden_states,\n attention_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n timestep,\n cross_attention_kwargs,\n class_labels,\n use_reentrant=False,\n )\n else:\n hidden_states = block(\n hidden_states,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n if self.is_input_continuous:\n if not self.use_linear_projection:\n hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()\n hidden_states = (\n self.proj_out(hidden_states, scale=lora_scale)\n if not USE_PEFT_BACKEND\n else self.proj_out(hidden_states)\n )\n else:\n hidden_states = (\n self.proj_out(hidden_states, scale=lora_scale)\n if not USE_PEFT_BACKEND\n else self.proj_out(hidden_states)\n )\n hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()\n\n output = hidden_states + residual\n elif self.is_input_vectorized:\n hidden_states = self.norm_out(hidden_states)\n logits = self.out(hidden_states)\n # (batch, self.num_vector_embeds - 1, self.num_latent_pixels)\n logits = logits.permute(0, 2, 1)\n\n # log(p(x_0))\n output = F.log_softmax(logits.double(), dim=1).float()\n\n if self.is_input_patches:\n if self.config.norm_type != \"ada_norm_single\":\n conditioning = self.transformer_blocks[0].norm1.emb(\n timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)\n hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]\n hidden_states = self.proj_out_2(hidden_states)\n elif self.config.norm_type == \"ada_norm_single\":\n shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1)\n hidden_states = self.norm_out(hidden_states)\n # Modulation\n hidden_states = hidden_states * (1 + scale) + shift\n hidden_states = self.proj_out(hidden_states)\n hidden_states = hidden_states.squeeze(1)\n\n # unpatchify\n if self.adaln_single is None:\n height = width = int(hidden_states.shape[1] ** 0.5)\n hidden_states = hidden_states.reshape(\n shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)\n )\n hidden_states = torch.einsum(\"nhwpqc->nchpwq\", hidden_states)\n output = hidden_states.reshape(\n shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)\n )\n\n if not return_dict:\n return (output,)\n\n return Transformer2DModelOutput(sample=output)" }, { "identifier": "TransformerTemporalModel", "path": "src/diffusers/models/transformer_temporal.py", "snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n \"\"\"\n A Transformer model for video-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n The number of channels in the input and output (specify if the input is **continuous**).\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.\n attention_bias (`bool`, *optional*):\n Configure if the `TransformerBlock` attention should contain a bias parameter.\n sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).\n This is fixed during training since it is used to learn a number of position embeddings.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`):\n Activation function to use in feed-forward. See `diffusers.models.activations.get_activation` for supported\n activation functions.\n norm_elementwise_affine (`bool`, *optional*):\n Configure if the `TransformerBlock` should use learnable elementwise affine parameters for normalization.\n double_self_attention (`bool`, *optional*):\n Configure if each `TransformerBlock` should contain two self-attention layers.\n positional_embeddings: (`str`, *optional*):\n The type of positional embeddings to apply to the sequence input before passing use.\n num_positional_embeddings: (`int`, *optional*):\n The maximum length of the sequence over which to apply positional embeddings.\n \"\"\"\n\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n norm_elementwise_affine: bool = True,\n double_self_attention: bool = True,\n positional_embeddings: Optional[str] = None,\n num_positional_embeddings: Optional[int] = None,\n ):\n super().__init__()\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n attention_bias=attention_bias,\n double_self_attention=double_self_attention,\n norm_elementwise_affine=norm_elementwise_affine,\n positional_embeddings=positional_embeddings,\n num_positional_embeddings=num_positional_embeddings,\n )\n for d in range(num_layers)\n ]\n )\n\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n encoder_hidden_states: Optional[torch.LongTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n class_labels: torch.LongTensor = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> TransformerTemporalModelOutput:\n \"\"\"\n The [`TransformerTemporal`] forward method.\n\n Args:\n hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):\n Input hidden_states.\n encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n timestep ( `torch.LongTensor`, *optional*):\n Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.\n class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):\n Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in\n `AdaLayerZeroNorm`.\n num_frames (`int`, *optional*, defaults to 1):\n The number of frames to be processed per batch. This is used to reshape the hidden states.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n\n Returns:\n [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is\n returned, otherwise a `tuple` where the first element is the sample tensor.\n \"\"\"\n # 1. Input\n batch_frames, channel, height, width = hidden_states.shape\n batch_size = batch_frames // num_frames\n\n residual = hidden_states\n\n hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width)\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4)\n\n hidden_states = self.norm(hidden_states)\n hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel)\n\n hidden_states = self.proj_in(hidden_states)\n\n # 2. Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states[None, None, :]\n .reshape(batch_size, height, width, num_frames, channel)\n .permute(0, 3, 4, 1, 2)\n .contiguous()\n )\n hidden_states = hidden_states.reshape(batch_frames, channel, height, width)\n\n output = hidden_states + residual\n\n if not return_dict:\n return (output,)\n\n return TransformerTemporalModelOutput(sample=output)" } ]
from typing import Any, Dict, Optional, Tuple, Union from torch import nn from ..utils import is_torch_version from ..utils.torch_utils import apply_freeu from .dual_transformer_2d import DualTransformer2DModel from .resnet import Downsample2D, ResnetBlock2D, TemporalConvLayer, Upsample2D from .transformer_2d import Transformer2DModel from .transformer_temporal import TransformerTemporalModel import torch
15,784
resnet_act_fn: str, num_attention_heads: int, resolution_idx: Optional[int] = None, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = True, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", temporal_num_attention_heads: int = 8, temporal_cross_attention_dim: Optional[int] = None, temporal_max_seq_length: int = 32, ) -> Union["UpBlock3D", "CrossAttnUpBlock3D", "UpBlockMotion", "CrossAttnUpBlockMotion"]: if up_block_type == "UpBlock3D": return UpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, ) elif up_block_type == "CrossAttnUpBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D") return CrossAttnUpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, ) if up_block_type == "UpBlockMotion": return UpBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif up_block_type == "CrossAttnUpBlockMotion": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockMotion") return CrossAttnUpBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) raise ValueError(f"{up_block_type} does not exist.") class UNetMidBlock3DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, output_scale_factor: float = 1.0, cross_attention_dim: int = 1280, dual_cross_attention: bool = False, use_linear_projection: bool = True, upcast_attention: bool = False, ): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # there is always at least one resnet resnets = [
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_down_block( down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool, resnet_eps: float, resnet_act_fn: str, num_attention_heads: int, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, downsample_padding: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = True, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", temporal_num_attention_heads: int = 8, temporal_max_seq_length: int = 32, ) -> Union["DownBlock3D", "CrossAttnDownBlock3D", "DownBlockMotion", "CrossAttnDownBlockMotion"]: if down_block_type == "DownBlock3D": return DownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "CrossAttnDownBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D") return CrossAttnDownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) if down_block_type == "DownBlockMotion": return DownBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif down_block_type == "CrossAttnDownBlockMotion": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockMotion") return CrossAttnDownBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type: str, num_layers: int, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, add_upsample: bool, resnet_eps: float, resnet_act_fn: str, num_attention_heads: int, resolution_idx: Optional[int] = None, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = True, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", temporal_num_attention_heads: int = 8, temporal_cross_attention_dim: Optional[int] = None, temporal_max_seq_length: int = 32, ) -> Union["UpBlock3D", "CrossAttnUpBlock3D", "UpBlockMotion", "CrossAttnUpBlockMotion"]: if up_block_type == "UpBlock3D": return UpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, ) elif up_block_type == "CrossAttnUpBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D") return CrossAttnUpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, ) if up_block_type == "UpBlockMotion": return UpBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif up_block_type == "CrossAttnUpBlockMotion": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockMotion") return CrossAttnUpBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) raise ValueError(f"{up_block_type} does not exist.") class UNetMidBlock3DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, output_scale_factor: float = 1.0, cross_attention_dim: int = 1280, dual_cross_attention: bool = False, use_linear_projection: bool = True, upcast_attention: bool = False, ): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # there is always at least one resnet resnets = [
ResnetBlock2D(
4
2023-11-18 01:40:55+00:00
24k
wjun0830/CGDETR
cg_detr/train.py
[ { "identifier": "BaseOptions", "path": "cg_detr/config.py", "snippet": "class BaseOptions(object):\n saved_option_filename = \"opt.json\"\n ckpt_filename = \"model.ckpt\"\n tensorboard_log_dir = \"tensorboard_log\"\n train_log_filename = \"train.log.txt\"\n eval_log_filename = \"eval.log.txt\"\n\n def __init__(self):\n self.parser = None\n self.initialized = False\n self.opt = None\n\n def initialize(self):\n self.initialized = True\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dset_name\", type=str, choices=[\"hl\", 'tvsum', 'charadesSTA', 'tacos', 'nlq','youtube_uni'])\n parser.add_argument(\"--dset_domain\", type=str, \n help=\"Domain to train for tvsum dataset. (Only used for tvsum and youtube-hl)\")\n \n parser.add_argument(\"--eval_split_name\", type=str, default=\"val\",\n help=\"should match keys in video_duration_idx_path, must set for VCMR\")\n parser.add_argument(\"--debug\", action=\"store_true\",\n help=\"debug (fast) mode, break all loops, do not load all data into memory.\")\n parser.add_argument(\"--data_ratio\", type=float, default=1.0,\n help=\"how many training and eval data to use. 1.0: use all, 0.1: use 10%.\"\n \"Use small portion for debug purposes. Note this is different from --debug, \"\n \"which works by breaking the loops, typically they are not used together.\")\n parser.add_argument(\"--results_root\", type=str, default=\"results\")\n parser.add_argument(\"--exp_id\", type=str, default=None, help=\"id of this run, required at training\")\n parser.add_argument(\"--seed\", type=int, default=2018, help=\"random seed\")\n parser.add_argument(\"--device\", type=int, default=0, help=\"0 cuda, -1 cpu\")\n parser.add_argument(\"--num_workers\", type=int, default=0,\n help=\"num subprocesses used to load the data, 0: use main process\")\n parser.add_argument(\"--no_pin_memory\", action=\"store_true\",\n help=\"Don't use pin_memory=True for dataloader. \"\n \"ref: https://discuss.pytorch.org/t/should-we-set-non-blocking-to-true/38234/4\")\n\n # training config\n parser.add_argument(\"--lr\", type=float, default=1e-4, help=\"learning rate\")\n parser.add_argument(\"--lr_drop\", type=int, default=400, help=\"drop learning rate to 1/10 every lr_drop epochs\")\n parser.add_argument(\"--wd\", type=float, default=1e-4, help=\"weight decay\")\n parser.add_argument(\"--n_epoch\", type=int, default=200, help=\"number of epochs to run\")\n parser.add_argument(\"--max_es_cnt\", type=int, default=200,\n help=\"number of epochs to early stop, use -1 to disable early stop\")\n parser.add_argument(\"--bsz\", type=int, default=32, help=\"mini-batch size\")\n parser.add_argument(\"--eval_bsz\", type=int, default=100,\n help=\"mini-batch size at inference, for query\")\n parser.add_argument(\"--eval_epoch\", type=int, default=5,\n help=\"inference epoch\")\n parser.add_argument(\"--grad_clip\", type=float, default=0.1, help=\"perform gradient clip, -1: disable\")\n parser.add_argument(\"--eval_untrained\", action=\"store_true\", help=\"Evaluate on un-trained model\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"checkpoint path to resume or evaluate, without --resume_all this only load weights\")\n parser.add_argument(\"--resume_all\", action=\"store_true\",\n help=\"if --resume_all, load optimizer/scheduler/epoch as well\")\n parser.add_argument(\"--start_epoch\", type=int, default=None,\n help=\"if None, will be set automatically when using --resume_all\")\n\n # Data config\n parser.add_argument(\"--max_q_l\", type=int, default=-1)\n parser.add_argument(\"--max_v_l\", type=int, default=-1)\n parser.add_argument(\"--clip_length\", type=float, default=2)\n parser.add_argument(\"--max_windows\", type=int, default=5)\n\n parser.add_argument(\"--train_path\", type=str, default=None)\n parser.add_argument(\"--eval_path\", type=str, default=None,\n help=\"Evaluating during training, for Dev set. If None, will only do training, \")\n parser.add_argument(\"--no_norm_vfeat\", action=\"store_true\", help=\"Do not do normalize video feat\")\n parser.add_argument(\"--no_norm_tfeat\", action=\"store_true\", help=\"Do not do normalize text feat\")\n parser.add_argument(\"--v_feat_dirs\", type=str, nargs=\"+\",\n help=\"video feature dirs. If more than one, will concat their features. \"\n \"Note that sub ctx features are also accepted here.\")\n parser.add_argument(\"--t_feat_dir\", type=str, help=\"text/query feature dir\")\n parser.add_argument(\"--a_feat_dir\", type=str, help=\"audio feature dir\")\n parser.add_argument(\"--v_feat_dim\", type=int, help=\"video feature dim\")\n parser.add_argument(\"--t_feat_dim\", type=int, help=\"text/query feature dim\")\n parser.add_argument(\"--a_feat_dim\", type=int, help=\"audio feature dim\")\n parser.add_argument(\"--ctx_mode\", type=str, default=\"video_tef\")\n\n # Model config\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\n help=\"Type of positional embedding to use on top of the image features\")\n # * Transformer\n parser.add_argument('--enc_layers', default=3, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dec_layers', default=3, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--t2v_layers', default=2, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--sent_layers', default=1, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--moment_layers', default=1, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--dummy_layers', default=2, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dim_feedforward', default=1024, type=int,\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\n parser.add_argument('--hidden_dim', default=256, type=int,\n help=\"Size of the embeddings (dimension of the transformer)\")\n parser.add_argument('--input_dropout', default=0.5, type=float,\n help=\"Dropout applied in input\")\n parser.add_argument('--dropout', default=0.1, type=float,\n help=\"Dropout applied in the transformer\")\n parser.add_argument(\"--txt_drop_ratio\", default=0, type=float,\n help=\"drop txt_drop_ratio tokens from text input. 0.1=10%\")\n parser.add_argument(\"--use_txt_pos\", action=\"store_true\", help=\"use position_embedding for text as well.\")\n parser.add_argument('--nheads', default=8, type=int,\n help=\"Number of attention heads inside the transformer's attentions\")\n parser.add_argument('--num_queries', default=10, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--num_dummies', default=45, type=int,\n help=\"Number of dummy tokens\")\n parser.add_argument('--total_prompts', default=10, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--num_prompts', default=1, type=int,\n help=\"Number of dummy tokens\")\n parser.add_argument('--pre_norm', action='store_true')\n # other model configs\n parser.add_argument(\"--n_input_proj\", type=int, default=2, help=\"#layers to encoder input\")\n parser.add_argument(\"--contrastive_hdim\", type=int, default=64, help=\"dim for contrastive embeddings\")\n parser.add_argument(\"--temperature\", type=float, default=0.07, help=\"temperature nce contrastive_align_loss\")\n # Loss\n\n parser.add_argument(\"--saliency_margin\", type=float, default=0.2)\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\n parser.add_argument(\"--span_loss_type\", default=\"l1\", type=str, choices=['l1', 'ce'],\n help=\"l1: (center-x, width) regression. ce: (st_idx, ed_idx) classification.\")\n parser.add_argument(\"--contrastive_align_loss\", action=\"store_true\",\n help=\"Disable contrastive_align_loss between matched query spans and the text.\")\n # * Matcher\n parser.add_argument('--set_cost_span', default=10, type=float,\n help=\"L1 span coefficient in the matching cost\")\n parser.add_argument('--set_cost_giou', default=1, type=float,\n help=\"giou span coefficient in the matching cost\")\n parser.add_argument('--set_cost_class', default=4, type=float,\n help=\"Class coefficient in the matching cost\")\n\n # * Loss coefficients\n parser.add_argument(\"--lw_saliency\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument(\"--lw_wattn\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument(\"--lw_ms_align\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument(\"--lw_distill\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument('--span_loss_coef', default=10, type=float)\n parser.add_argument('--giou_loss_coef', default=1, type=float)\n parser.add_argument('--label_loss_coef', default=4, type=float)\n parser.add_argument('--eos_coef', default=0.1, type=float,\n help=\"Relative classification weight of the no-object class\")\n parser.add_argument(\"--contrastive_align_loss_coef\", default=0.0, type=float)\n\n parser.add_argument(\"--no_sort_results\", action=\"store_true\",\n help=\"do not sort results, use this for moment query visualization\")\n parser.add_argument(\"--max_before_nms\", type=int, default=10)\n parser.add_argument(\"--max_after_nms\", type=int, default=10)\n parser.add_argument(\"--conf_thd\", type=float, default=0.0, help=\"only keep windows with conf >= conf_thd\")\n parser.add_argument(\"--nms_thd\", type=float, default=-1,\n help=\"additionally use non-maximum suppression \"\n \"(or non-minimum suppression for distance)\"\n \"to post-processing the predictions. \"\n \"-1: do not use nms. [0, 1]\")\n self.parser = parser\n\n def display_save(self, opt):\n args = vars(opt)\n # Display settings\n print(dict_to_markdown(vars(opt), max_str_len=120))\n # Save settings\n if not isinstance(self, TestOptions):\n option_file_path = os.path.join(opt.results_dir, self.saved_option_filename) # not yaml file indeed\n save_json(args, option_file_path, save_pretty=True)\n\n def parse(self, a_feat_dir=None):\n if not self.initialized:\n self.initialize()\n opt = self.parser.parse_args()\n\n if opt.debug:\n opt.results_root = os.path.sep.join(opt.results_root.split(os.path.sep)[:-1] + [\"debug_results\", ])\n opt.num_workers = 0\n\n if isinstance(self, TestOptions):\n # modify model_dir to absolute path\n # opt.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"results\", opt.model_dir)\n opt.model_dir = os.path.dirname(opt.resume)\n if a_feat_dir is not None:\n opt.a_feat_dir = a_feat_dir\n saved_options = load_json(os.path.join(opt.model_dir, self.saved_option_filename))\n for arg in saved_options: # use saved options to overwrite all BaseOptions args.\n if arg not in [\"results_root\", \"num_workers\", \"nms_thd\", \"debug\", # \"max_before_nms\", \"max_after_nms\"\n \"max_pred_l\", \"min_pred_l\",\n \"resume\", \"resume_all\", \"no_sort_results\"]:\n setattr(opt, arg, saved_options[arg])\n # opt.no_core_driver = True\n if opt.eval_results_dir is not None:\n opt.results_dir = opt.eval_results_dir\n else:\n if opt.exp_id is None:\n raise ValueError(\"--exp_id is required for at a training option!\")\n\n ctx_str = opt.ctx_mode + \"_sub\" if any([\"sub_ctx\" in p for p in opt.v_feat_dirs]) else opt.ctx_mode\n opt.results_dir = os.path.join(opt.results_root,\n \"-\".join([opt.dset_name, ctx_str, opt.exp_id,\n str(opt.enc_layers) + str(opt.dec_layers) + str(opt.t2v_layers) + str(opt.moment_layers) + str(opt.dummy_layers) + str(opt.sent_layers),\n 'ndum_' + str(opt.num_dummies), 'nprom_' + str(opt.num_prompts) + '_' + str(opt.total_prompts)]))\n mkdirp(opt.results_dir)\n save_fns = ['cg_detr/model.py', 'cg_detr/transformer.py']\n for save_fn in save_fns:\n shutil.copyfile(save_fn, os.path.join(opt.results_dir, os.path.basename(save_fn)))\n\n # save a copy of current code\n code_dir = os.path.dirname(os.path.realpath(__file__))\n code_zip_filename = os.path.join(opt.results_dir, \"code.zip\")\n make_zipfile(code_dir, code_zip_filename,\n enclosing_dir=\"code\",\n exclude_dirs_substring=\"results\",\n exclude_dirs=[\"results\", \"debug_results\", \"__pycache__\"],\n exclude_extensions=[\".pyc\", \".ipynb\", \".swap\"], )\n\n self.display_save(opt)\n\n opt.ckpt_filepath = os.path.join(opt.results_dir, self.ckpt_filename)\n opt.train_log_filepath = os.path.join(opt.results_dir, self.train_log_filename)\n opt.eval_log_filepath = os.path.join(opt.results_dir, self.eval_log_filename)\n opt.tensorboard_log_dir = os.path.join(opt.results_dir, self.tensorboard_log_dir)\n opt.device = torch.device(\"cuda\" if opt.device >= 0 else \"cpu\")\n opt.pin_memory = not opt.no_pin_memory\n\n opt.use_tef = \"tef\" in opt.ctx_mode\n opt.use_video = \"video\" in opt.ctx_mode\n if not opt.use_video:\n opt.v_feat_dim = 0\n if opt.use_tef:\n opt.v_feat_dim += 2\n\n self.opt = opt\n return opt" }, { "identifier": "StartEndDataset", "path": "cg_detr/start_end_dataset.py", "snippet": "class StartEndDataset(Dataset):\n Q_FEAT_TYPES = [\"pooler_output\", \"last_hidden_state\"]\n \"\"\"One line in data loaded from data_path.\"\n {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17],\n \"relevant_windows\": [[26, 36]]\n }\n \"\"\"\n\n def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir,\n q_feat_type=\"last_hidden_state\",\n max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode=\"video\",\n normalize_v=True, normalize_t=True, load_labels=True,\n clip_len=2, max_windows=5, span_loss_type=\"l1\", txt_drop_ratio=0,\n dset_domain=None):\n self.dset_name = dset_name\n self.data_path = data_path\n self.data_ratio = data_ratio\n self.v_feat_dirs = v_feat_dirs \\\n if isinstance(v_feat_dirs, list) else [v_feat_dirs]\n self.q_feat_dir = q_feat_dir\n self.q_feat_type = q_feat_type\n if max_v_l == -1:\n max_v_l = 100000000\n if max_q_l == -1:\n max_q_l = 100\n self.max_q_l = max_q_l\n self.max_v_l = max_v_l\n self.ctx_mode = ctx_mode\n self.use_tef = \"tef\" in ctx_mode\n self.use_video = \"video\" in ctx_mode\n self.normalize_t = normalize_t\n self.normalize_v = normalize_v\n self.load_labels = load_labels\n self.clip_len = clip_len\n self.max_windows = max_windows # maximum number of windows to use as labels\n self.span_loss_type = span_loss_type\n self.txt_drop_ratio = txt_drop_ratio\n if \"val\" in data_path or \"test\" in data_path:\n assert txt_drop_ratio == 0\n\n\n # checks\n assert q_feat_type in self.Q_FEAT_TYPES\n\n # data\n self.data = self.load_data()\n \n # load specific domain data for tvsum dataset\n if self.dset_name in ['tvsum', 'tvsum_sfc']:\n target_domain = dset_domain\n assert target_domain in [\"BK\", \"BT\", \"DS\", \"FM\", \"GA\", \"MS\", \"PK\", \"PR\", \"VT\", \"VU\"]\n\n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data\n \n # load specific domain data for youtube-hl dataset\n if self.dset_name == 'youtube_uni':\n target_domain = dset_domain\n assert target_domain in [\"dog\", \"gymnastics\", \"parkour\", \"skating\", \"skiing\", \"surfing\"]\n \n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data \n \n self.use_glove = False\n self.use_glove = 'vgg' in self.v_feat_dirs[0]\n\n if self.dset_name == 'charadesSTA' and self.use_glove:\n self.vocab = vocab.pretrained_aliases['glove.6B.300d']()\n self.vocab.itos.extend(['<unk>'])\n self.vocab.stoi['<unk>'] = self.vocab.vectors.shape[0]\n self.vocab.vectors = torch.cat(\n (self.vocab.vectors, torch.zeros(1, self.vocab.dim)), dim=0)\n self.embedding = nn.Embedding.from_pretrained(self.vocab.vectors)\n \n\n def load_data(self):\n datalist = load_jsonl(self.data_path)\n if self.data_ratio != 1:\n n_examples = int(len(datalist) * self.data_ratio)\n datalist = datalist[:n_examples]\n logger.info(\"Using {}% of the data: {} examples\"\n .format(self.data_ratio * 100, n_examples))\n return datalist\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n meta = self.data[index]\n\n model_inputs = dict()\n\n if self.use_glove:\n model_inputs[\"query_feat\"] = self.get_query(meta[\"query\"])\n else:\n model_inputs[\"query_feat\"] = self._get_query_feat_by_qid(meta[\"qid\"]) # (Dq, ) or (Lq, Dq)\n \n if self.use_video:\n model_inputs[\"video_feat\"] = self._get_video_feat_by_vid(meta[\"vid\"]) # (Lv, Dv)\n ctx_l = len(model_inputs[\"video_feat\"])\n else:\n ctx_l = self.max_v_l\n\n\n if self.use_tef:\n tef_st = torch.arange(0, ctx_l, 1.0) / ctx_l\n tef_ed = tef_st + 1.0 / ctx_l\n tef = torch.stack([tef_st, tef_ed], dim=1) # (Lv, 2)\n if self.use_video:\n model_inputs[\"video_feat\"] = torch.cat(\n [model_inputs[\"video_feat\"], tef], dim=1) # (Lv, Dv+2)\n else:\n model_inputs[\"video_feat\"] = tef\n\n\n if self.dset_name in ['tvsum']:\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_tvsum(meta_label, ctx_l)\n if len(model_inputs[\"saliency_all_labels\"]) != len(model_inputs[\"video_feat\"]):\n model_inputs[\"video_feat\"] = model_inputs[\"video_feat\"][:len(model_inputs[\"saliency_all_labels\"])]\n\n elif self.dset_name == 'youtube_uni':\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_youtube(meta_label, ctx_l)\n else:\n if \"relevant_windows\" in meta: ## For Qvhighlights test set\n model_inputs[\"span_labels\"] = self.get_span_labels(meta[\"relevant_windows\"], ctx_l) # (#windows, 2)\n if self.dset_name in ['charadesSTA', 'tacos', 'activitynet']: ## charades, tacos, nlq\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n elif self.dset_name in ['nlq']:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l, 2) # only one gt\n elif \"subs_train\" not in self.data_path:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all(meta[\"relevant_clip_ids\"], meta[\"saliency_scores\"], ctx_l)\n else:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\n \"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n\n if 'qvhighlight' in self.data_path:\n model_inputs[\"relevant_clip_ids\"] = meta[\"relevant_clip_ids\"]\n model_inputs[\"vid\"] = meta[\"vid\"]\n model_inputs[\"qid\"] = meta[\"qid\"]\n return dict(meta=meta, model_inputs=model_inputs)\n\n def get_query(self, query):\n word_inds = torch.LongTensor(\n [self.vocab.stoi.get(w.lower(), 400000) for w in query.split()])\n return self.embedding(word_inds)\n\n def get_saliency_labels_sub_as_query(self, gt_window, duration, ctx_l, max_n=2):\n clip_len = duration / ctx_l\n gt_st = int(gt_window[0] / clip_len)\n gt_ed = max(0, min(int(gt_window[1] / clip_len), ctx_l) - 1)\n if gt_st > gt_ed:\n gt_st = gt_ed\n\n if gt_st != gt_ed:\n pos_clip_indices = random.sample(range(gt_st, gt_ed + 1), k=max_n)\n else:\n if self.dset_name == 'nlq':\n pos_clip_indices = [gt_st] * 2\n else:\n pos_clip_indices = [gt_st, gt_st]\n\n neg_pool = list(range(0, gt_st)) + list(range(gt_ed+1, ctx_l))\n try:\n neg_clip_indices = random.sample(neg_pool, k=max_n)\n except:\n neg_clip_indices = pos_clip_indices\n\n # For charades_sta\n score_array = np.zeros(ctx_l)\n score_array[gt_st:gt_ed + 1] = 1\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n\n def get_saliency_labels(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices\n\n def get_saliency_labels_all(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # score_array = [min(agg_scores[idx], ctx_l-1) for idx in range(ctx_l)]\n score_array = np.zeros(ctx_l)\n for idx in range(len(rel_clip_ids)):\n if rel_clip_ids[idx] >= ctx_l:\n score_array_new = np.zeros(ctx_l + 1)\n score_array_new[:ctx_l] = score_array\n score_array = score_array_new\n score_array[rel_clip_ids[idx]] = agg_scores[idx]\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_tvsum(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n agg_scores = np.sum(labels - np.ones_like(labels), axis=-1)[:ctx_l] # start from 1, so minus 1\n score_array = agg_scores / 80 * 12\n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_youtube(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n # Youtube-hl only have binary score\n agg_scores = np.array(labels)[:, 0] # (L, 1) --> (L, )\n score_array = agg_scores * 1\n \n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n \n def get_span_labels(self, windows, ctx_l):\n \"\"\"\n windows: list([st, ed]) in seconds. E.g. [[26, 36]], corresponding st_ed clip_indices [[13, 17]] (inclusive)\n Note a maximum of `self.max_windows` windows are used.\n returns Tensor of shape (#windows, 2), each row is [center, width] normalized by video length\n \"\"\"\n if len(windows) > self.max_windows:\n random.shuffle(windows)\n windows = windows[:self.max_windows]\n if self.span_loss_type == \"l1\":\n windows = torch.Tensor(windows) / (ctx_l * self.clip_len) # normalized windows in xx\n windows = span_xx_to_cxw(windows) # normalized windows in cxw\n elif self.span_loss_type == \"ce\":\n windows = torch.Tensor([\n [int(w[0] / self.clip_len), min(int(w[1] / self.clip_len), ctx_l) - 1]\n for w in windows]).long() # inclusive\n else:\n raise NotImplementedError\n return windows\n\n def _get_query_feat_by_qid(self, qid):\n if self.dset_name == 'tvsum':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid))) # 'token', 'text'\n return torch.from_numpy(q_feat['token'])\n # youtube-hl\n elif self.dset_name == 'youtube_uni':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid)))\n return torch.from_numpy(q_feat['last_hidden_state'])\n \n elif self.dset_name in ['tacos', 'nlq']:\n q_feat_path = join(self.q_feat_dir, f\"{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n else:\n # QVhighlight dataset\n q_feat_path = join(self.q_feat_dir, f\"qid{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n return torch.from_numpy(q_feat) # (D, ) or (Lq, D)\n\n def random_drop_rows(self, embeddings):\n \"\"\"randomly mask num_drop rows in embeddings to be zero.\n Args:\n embeddings: np.ndarray (L, D)\n \"\"\"\n num_drop_rows = round(len(embeddings) * self.txt_drop_ratio)\n if num_drop_rows > 0:\n row_indices = np.random.choice(\n len(embeddings), size=num_drop_rows, replace=False)\n embeddings[row_indices] = 0\n return embeddings\n\n def _get_video_feat_by_vid(self, vid):\n if self.dset_name == 'tvsum':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n _feat_path = join(_feat_dir, f\"{vid}_rgb.npy\")\n _feat_rgb = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n\n _feat_path = join(_feat_dir, f\"{vid}_opt.npy\")\n _feat_opt = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n _feat = np.concatenate([_feat_rgb, _feat_opt], axis=-1)\n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n elif self.dset_name == 'youtube_uni':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n # Only single npz files per directory\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list] # TODO do we need to cut the length over the min_len?\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n else:\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n return torch.from_numpy(v_feat) # (Lv, D)" }, { "identifier": "start_end_collate", "path": "cg_detr/start_end_dataset.py", "snippet": "def start_end_collate(batch):\n batch_meta = [e[\"meta\"] for e in batch] # seems no need to collate ?\n\n model_inputs_keys = batch[0][\"model_inputs\"].keys()\n batched_data = dict()\n for k in model_inputs_keys:\n if k == \"span_labels\":\n batched_data[k] = [dict(spans=e[\"model_inputs\"][\"span_labels\"]) for e in batch]\n continue\n if k in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n batched_data[k] = torch.LongTensor([e[\"model_inputs\"][k] for e in batch])\n continue\n if k == \"saliency_all_labels\":\n pad_data, mask_data = pad_sequences_1d([e[\"model_inputs\"][k] for e in batch], dtype=np.float32, fixed_length=None)\n batched_data[k] = torch.tensor(pad_data, dtype=torch.float32)\n continue\n if k == 'qid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n if k == 'vid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n batched_data[k] = pad_sequences_1d(\n [e[\"model_inputs\"][k] for e in batch], dtype=torch.float32, fixed_length=None)\n return batch_meta, batched_data" }, { "identifier": "prepare_batch_inputs", "path": "cg_detr/start_end_dataset.py", "snippet": "def prepare_batch_inputs(batched_model_inputs, device, non_blocking=False):\n model_inputs = dict(\n src_txt=batched_model_inputs[\"query_feat\"][0].to(device, non_blocking=non_blocking),\n src_txt_mask=batched_model_inputs[\"query_feat\"][1].to(device, non_blocking=non_blocking),\n src_vid=batched_model_inputs[\"video_feat\"][0].to(device, non_blocking=non_blocking),\n src_vid_mask=batched_model_inputs[\"video_feat\"][1].to(device, non_blocking=non_blocking),\n vid=batched_model_inputs[\"vid\"],\n qid=batched_model_inputs[\"qid\"],\n )\n targets = {}\n\n if \"span_labels\" in batched_model_inputs:\n targets[\"span_labels\"] = [\n dict(spans=e[\"spans\"].to(device, non_blocking=non_blocking))\n for e in batched_model_inputs[\"span_labels\"]\n ]\n if \"saliency_pos_labels\" in batched_model_inputs:\n for name in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n targets[name] = batched_model_inputs[name].to(device, non_blocking=non_blocking)\n\n if \"saliency_all_labels\" in batched_model_inputs:\n targets[\"saliency_all_labels\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets[\"relevant_clips\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets = None if len(targets) == 0 else targets\n return model_inputs, targets" }, { "identifier": "eval_epoch", "path": "cg_detr/inference.py", "snippet": "def eval_epoch(model, eval_dataset, opt, save_submission_filename, epoch_i=None, criterion=None, tb_writer=None):\n logger.info(\"Generate submissions\")\n model.eval()\n if criterion is not None and eval_dataset.load_labels:\n criterion.eval()\n else:\n criterion = None\n\n if opt.dset_name == 'tacos':\n shuffle = True\n else:\n shuffle = False\n\n eval_loader = DataLoader(\n eval_dataset,\n collate_fn=start_end_collate,\n batch_size=opt.eval_bsz,\n num_workers=opt.num_workers,\n shuffle=shuffle,\n pin_memory=opt.pin_memory\n )\n\n\n # tvsum \n if opt.dset_name in ['tvsum', 'youtube_uni']:\n metrics, eval_loss_meters = compute_hl_results(model, eval_loader, opt, epoch_i, criterion, tb_writer)\n \n # to match original save format\n submission = [\n {\"brief\": metrics}\n ]\n submission_path = os.path.join(opt.results_dir, \"latest_metric.jsonl\")\n save_jsonl(submission, submission_path)\n\n return submission[0], submission[0], eval_loss_meters, [submission_path]\n\n else:\n submission, eval_loss_meters = get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer)\n\n if opt.dset_name in ['charadesSTA', 'tacos', 'nlq']:\n new_submission = []\n for s in submission:\n s.pop('pred_saliency_scores', None)\n new_submission.append(s)\n submission = new_submission\n\n if opt.no_sort_results:\n save_submission_filename = save_submission_filename.replace(\".jsonl\", \"_unsorted.jsonl\")\n metrics, metrics_nms, latest_file_paths = eval_epoch_post_processing(\n submission, opt, eval_dataset.data, save_submission_filename)\n return metrics, metrics_nms, eval_loss_meters, latest_file_paths" }, { "identifier": "start_inference", "path": "cg_detr/inference.py", "snippet": "def start_inference(train_opt=None, split=None, splitfile=None):\n if train_opt is not None:\n opt = TestOptions().parse(train_opt.a_feat_dir)\n else:\n opt = TestOptions().parse()\n if split is not None:\n opt.eval_split_name = split\n if splitfile is not None:\n opt.eval_path = splitfile\n\n print(opt.eval_split_name)\n print(opt.eval_path)\n logger.info(\"Setup config, data and model...\")\n\n\n cudnn.benchmark = True\n cudnn.deterministic = False\n\n assert opt.eval_path is not None\n if opt.eval_split_name == 'val':\n loadlabel = True\n else:\n loadlabel = False\n\n eval_dataset = StartEndDataset(\n dset_name=opt.dset_name,\n data_path=opt.eval_path,\n v_feat_dirs=opt.v_feat_dirs,\n q_feat_dir=opt.t_feat_dir,\n q_feat_type=\"last_hidden_state\",\n max_q_l=opt.max_q_l,\n max_v_l=opt.max_v_l,\n ctx_mode=opt.ctx_mode,\n data_ratio=opt.data_ratio,\n normalize_v=not opt.no_norm_vfeat,\n normalize_t=not opt.no_norm_tfeat,\n clip_len=opt.clip_length,\n max_windows=opt.max_windows,\n load_labels=loadlabel, # opt.eval_split_name == \"val\",\n span_loss_type=opt.span_loss_type,\n txt_drop_ratio=0,\n dset_domain=opt.dset_domain,\n )\n\n\n\n model, criterion, _, _ = setup_model(opt)\n\n save_submission_filename = \"hl_{}_submission.jsonl\".format(\n opt.eval_split_name)\n # save_submission_filename = \"inference_{}_{}_{}_preds.jsonl\".format(\n # opt.dset_name, opt.eval_split_name, opt.eval_id)\n logger.info(\"Starting inference...\")\n with torch.no_grad():\n metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \\\n eval_epoch(model, eval_dataset, opt, save_submission_filename, criterion=criterion)\n if opt.eval_split_name == 'val':\n logger.info(\"metrics_no_nms {}\".format(pprint.pformat(metrics_no_nms[\"brief\"], indent=4)))\n if metrics_nms is not None:\n logger.info(\"metrics_nms {}\".format(pprint.pformat(metrics_nms[\"brief\"], indent=4)))" }, { "identifier": "setup_model", "path": "cg_detr/inference.py", "snippet": "def setup_model(opt):\n \"\"\"setup model/optimizer/scheduler and load checkpoints when needed\"\"\"\n logger.info(\"setup model/optimizer/scheduler\")\n model, criterion = build_model(opt)\n if opt.device.type == \"cuda\":\n logger.info(\"CUDA enabled.\")\n model.to(opt.device)\n criterion.to(opt.device)\n\n param_dicts = [{\"params\": [p for n, p in model.named_parameters() if p.requires_grad]}]\n optimizer = torch.optim.AdamW(param_dicts, lr=opt.lr, weight_decay=opt.wd)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_drop)\n\n if opt.resume is not None:\n logger.info(f\"Load checkpoint from {opt.resume}\")\n checkpoint = torch.load(opt.resume, map_location=\"cpu\")\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n if 'pt' in opt.resume[:-4]:\n if 'asr' in opt.resume[:25]:\n model.load_state_dict(checkpoint[\"model\"])\n else:\n for k, v in checkpoint[\"model\"].items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n # model.load_state_dict(checkpoint[\"model\"])\n model.load_state_dict(new_state_dict)\n else:\n model.load_state_dict(checkpoint[\"model\"])\n if opt.resume_all:\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n opt.start_epoch = checkpoint['epoch'] + 1\n logger.info(f\"Loaded model saved at epoch {checkpoint['epoch']} from checkpoint: {opt.resume}\")\n else:\n logger.warning(\"If you intend to evaluate the model, please specify --resume with ckpt path\")\n\n return model, criterion, optimizer, lr_scheduler" }, { "identifier": "AverageMeter", "path": "utils/basic_utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current/max/min value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n\n def update(self, val, n=1):\n self.max = max(val, self.max)\n self.min = min(val, self.min)\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" }, { "identifier": "dict_to_markdown", "path": "utils/basic_utils.py", "snippet": "def dict_to_markdown(d, max_str_len=120):\n # convert list into its str representation\n d = {k: v.__repr__() if isinstance(v, list) else v for k, v in d.items()}\n # truncate string that is longer than max_str_len\n if max_str_len is not None:\n d = {k: v[-max_str_len:] if isinstance(v, str) else v for k, v in d.items()}\n return pd.DataFrame(d, index=[0]).transpose().to_markdown()" }, { "identifier": "count_parameters", "path": "utils/model_utils.py", "snippet": "def count_parameters(model, verbose=True):\n \"\"\"Count number of parameters in PyTorch model,\n References: https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/7.\n\n from utils.utils import count_parameters\n count_parameters(model)\n import sys\n sys.exit(1)\n \"\"\"\n n_all = sum(p.numel() for p in model.parameters())\n n_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)\n if verbose:\n print(\"Parameter Count: all {:,d}; trainable {:,d}\".format(n_all, n_trainable))\n return n_all, n_trainable" } ]
import os import time import json import pprint import random import numpy as np import torch import torch.nn as nn import torch.backends.cudnn as cudnn import logging import sys from tqdm import tqdm, trange from collections import defaultdict from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from cg_detr.config import BaseOptions from cg_detr.start_end_dataset import \ StartEndDataset, start_end_collate, prepare_batch_inputs from cg_detr.inference import eval_epoch, start_inference, setup_model from utils.basic_utils import AverageMeter, dict_to_markdown from utils.model_utils import count_parameters
14,894
prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = 5 if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) # stop_score = metrics["brief"]["MR-full-mAP"] stop_score = metrics["brief"]["mAP"] if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def start_training(): logger.info("Setup config, data and model...") opt = BaseOptions().parse() set_seed(opt.seed) if opt.debug: # keep the model run deterministically # 'cudnn.benchmark = True' enabled auto finding the best algorithm for a specific input/net config. # Enable this only when input size is fixed. cudnn.benchmark = False cudnn.deterministic = True dataset_config = dict( dset_name=opt.dset_name, data_path=opt.train_path, v_feat_dirs=opt.v_feat_dirs, q_feat_dir=opt.t_feat_dir, q_feat_type="last_hidden_state", max_q_l=opt.max_q_l, max_v_l=opt.max_v_l, ctx_mode=opt.ctx_mode, data_ratio=opt.data_ratio, normalize_v=not opt.no_norm_vfeat, normalize_t=not opt.no_norm_tfeat, clip_len=opt.clip_length, max_windows=opt.max_windows, span_loss_type=opt.span_loss_type, txt_drop_ratio=opt.txt_drop_ratio, dset_domain=opt.dset_domain, ) dataset_config["data_path"] = opt.train_path
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def set_seed(seed, use_cuda=True): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if use_cuda: torch.cuda.manual_seed_all(seed) def train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer): logger.info(f"[Epoch {epoch_i+1}]") model.train() criterion.train() # init meters time_meters = defaultdict(AverageMeter) loss_meters = defaultdict(AverageMeter) num_training_examples = len(train_loader) timer_dataloading = time.time() for batch_idx, batch in tqdm(enumerate(train_loader), desc="Training Iteration", total=num_training_examples): time_meters["dataloading_time"].update(time.time() - timer_dataloading) timer_start = time.time() model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory) time_meters["prepare_inputs_time"].update(time.time() - timer_start) timer_start = time.time() outputs = model(**model_inputs, targets=targets) loss_dict = criterion(outputs, targets) weight_dict = criterion.weight_dict losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) time_meters["model_forward_time"].update(time.time() - timer_start) timer_start = time.time() optimizer.zero_grad() losses.backward() if opt.grad_clip > 0: nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) optimizer.step() time_meters["model_backward_time"].update(time.time() - timer_start) loss_dict["loss_overall"] = float(losses) # for logging only for k, v in loss_dict.items(): loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v)) timer_dataloading = time.time() if opt.debug and batch_idx == 3: break # print/add logs tb_writer.add_scalar("Train/lr", float(optimizer.param_groups[0]["lr"]), epoch_i+1) for k, v in loss_meters.items(): tb_writer.add_scalar("Train/{}".format(k), v.avg, epoch_i+1) to_write = opt.train_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i+1, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in loss_meters.items()])) with open(opt.train_log_filepath, "a") as f: f.write(to_write) logger.info("Epoch time stats:") for name, meter in time_meters.items(): d = {k: f"{getattr(meter, k):.4f}" for k in ["max", "min", "avg"]} logger.info(f"{name} ==> {d}") def train(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = opt.eval_epoch if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) if opt.dset_name in ['hl']: stop_score = metrics["brief"]["MR-full-mAP"] else: stop_score = (metrics["brief"]["[email protected]"] + metrics["brief"]["[email protected]"]) / 2 if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) # save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain # if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies # checkpoint = { # "model": model.state_dict(), # "optimizer": optimizer.state_dict(), # "epoch": epoch_i, # "opt": opt # } # torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = 5 if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) # stop_score = metrics["brief"]["MR-full-mAP"] stop_score = metrics["brief"]["mAP"] if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def start_training(): logger.info("Setup config, data and model...") opt = BaseOptions().parse() set_seed(opt.seed) if opt.debug: # keep the model run deterministically # 'cudnn.benchmark = True' enabled auto finding the best algorithm for a specific input/net config. # Enable this only when input size is fixed. cudnn.benchmark = False cudnn.deterministic = True dataset_config = dict( dset_name=opt.dset_name, data_path=opt.train_path, v_feat_dirs=opt.v_feat_dirs, q_feat_dir=opt.t_feat_dir, q_feat_type="last_hidden_state", max_q_l=opt.max_q_l, max_v_l=opt.max_v_l, ctx_mode=opt.ctx_mode, data_ratio=opt.data_ratio, normalize_v=not opt.no_norm_vfeat, normalize_t=not opt.no_norm_tfeat, clip_len=opt.clip_length, max_windows=opt.max_windows, span_loss_type=opt.span_loss_type, txt_drop_ratio=opt.txt_drop_ratio, dset_domain=opt.dset_domain, ) dataset_config["data_path"] = opt.train_path
train_dataset = StartEndDataset(**dataset_config)
1
2023-11-10 12:45:25+00:00
24k
ej0cl6/TextEE
TextEE/models/AMRIE/E2Etrainer.py
[ { "identifier": "BasicTrainer", "path": "TextEE/models/trainer.py", "snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, config):\n for instance in instances:\n instance[\"extra_info\"] = None\n return instances\n \n def load_model(self, checkpoint=None):\n pass\n \n def train(self, train_data, dev_data, **kwargs):\n pass\n \n def predict(self, data, **kwargs):\n pass" }, { "identifier": "AMRIEE2EModel", "path": "TextEE/models/AMRIE/E2Emodel.py", "snippet": "class AMRIEE2EModel(nn.Module):\n def __init__(self,\n config,\n vocabs,\n valid_patterns=None):\n super().__init__()\n\n self.if_local = 0\n # vocabularies\n self.vocabs = vocabs\n self.entity_label_stoi = vocabs['entity_label']\n self.trigger_label_stoi = vocabs['trigger_label']\n self.mention_type_stoi = vocabs['mention_type']\n self.entity_type_stoi = vocabs['entity_type']\n self.event_type_stoi = vocabs['event_type']\n self.relation_type_stoi = vocabs['relation_type']\n self.role_type_stoi = vocabs['role_type']\n self.entity_label_itos = {i:s for s, i in self.entity_label_stoi.items()}\n self.trigger_label_itos = {i:s for s, i in self.trigger_label_stoi.items()}\n self.entity_type_itos = {i: s for s, i in self.entity_type_stoi.items()}\n self.event_type_itos = {i: s for s, i in self.event_type_stoi.items()}\n self.relation_type_itos = {i: s for s, i in self.relation_type_stoi.items()}\n self.role_type_itos = {i: s for s, i in self.role_type_stoi.items()}\n self.entity_label_num = len(self.entity_label_stoi)\n self.trigger_label_num = len(self.trigger_label_stoi)\n self.mention_type_num = len(self.mention_type_stoi)\n self.entity_type_num = len(self.entity_type_stoi)\n self.event_type_num = len(self.event_type_stoi)\n self.relation_type_num = len(self.relation_type_stoi)\n self.role_type_num = len(self.role_type_stoi)\n self.valid_relation_entity = set()\n self.valid_event_role = set()\n self.valid_role_entity = set()\n if valid_patterns:\n self.valid_event_role = valid_patterns['event_role']\n self.valid_relation_entity = valid_patterns['relation_entity']\n self.valid_role_entity = valid_patterns['role_entity']\n self.relation_directional = config.relation_directional\n self.symmetric_relations = config.symmetric_relations\n self.symmetric_relation_idxs = {self.relation_type_stoi[r]\n for r in self.symmetric_relations}\n\n # BERT encoder\n self.pretrained_model_name = config.pretrained_model_name\n self.cache_dir = config.cache_dir\n if self.pretrained_model_name.startswith('bert-'):\n self.bert = BertModel.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir,\n output_hidden_states=True)\n self.bert_config = BertConfig.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir)\n elif self.pretrained_model_name.startswith('roberta-'):\n self.bert = RobertaModel.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir,\n output_hidden_states=True)\n self.bert_config = RobertaConfig.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir)\n elif self.pretrained_model_name.startswith('xlm-'):\n self.bert = XLMRobertaModel.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir,\n output_hidden_states=True)\n self.bert_config = XLMRobertaConfig.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir) \n else:\n raise ValueError\n self.bert_dim = self.bert_config.hidden_size\n self.extra_bert = config.extra_bert\n self.use_extra_bert = config.use_extra_bert\n if self.use_extra_bert:\n self.bert_dim *= 2\n # print(self.use_extra_bert)\n # print(bert_config)\n # self.bert = BertModel(bert_config)\n self.bert_dropout = nn.Dropout(p=config.bert_dropout)\n self.multi_piece = config.multi_piece_strategy\n # local classifiers\n self.use_entity_type = config.use_entity_type\n self.binary_dim = self.bert_dim * 2\n linear_bias = config.linear_bias\n linear_dropout = config.linear_dropout\n entity_hidden_num = config.entity_hidden_num\n mention_hidden_num = config.mention_hidden_num\n event_hidden_num = config.event_hidden_num\n relation_hidden_num = config.relation_hidden_num\n role_hidden_num = config.role_hidden_num\n self.edge_type_num = config.edge_type_num\n self.edge_type_dim = config.edge_type_dim\n self.use_graph_encoder = config.use_graph_encoder\n gnn_layers = config.gnn_layers\n self.lamda = config.lamda\n role_input_dim = self.binary_dim + (self.entity_type_num if self.use_entity_type else 0)\n self.device = config.gpu_device\n # print(self.bert_dim)\n if self.use_graph_encoder:\n if not self.if_local:\n self.graph_encoder = FinalGNN(self.bert_dim, self.edge_type_dim, self.edge_type_num, gnn_layers, self.lamda, config.gpu_device)\n else:\n self.graph_encoder = FinalGNN(self.bert_dim, self.edge_type_dim, self.edge_type_num, gnn_layers, self.lamda, 'cpu')\n self.entity_label_ffn = nn.Linear(self.bert_dim, self.entity_label_num,\n bias=linear_bias)\n self.trigger_label_ffn = nn.Linear(self.bert_dim, self.trigger_label_num,\n bias=linear_bias)\n self.entity_type_ffn = Linears([self.bert_dim, entity_hidden_num,\n self.entity_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.mention_type_ffn = Linears([self.bert_dim, mention_hidden_num,\n self.mention_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.event_type_ffn = Linears([self.bert_dim, event_hidden_num,\n self.event_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.relation_type_ffn = Linears([self.binary_dim, relation_hidden_num,\n self.relation_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.role_type_ffn = Linears([role_input_dim, role_hidden_num,\n self.role_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n # global features\n self.use_global_features = config.use_global_features\n self.global_features = config.global_features\n # print(self.global_features)\n self.global_feature_maps = generate_global_feature_maps(vocabs, valid_patterns)\n self.global_feature_num = sum(len(m) for k, m in self.global_feature_maps.items()\n if k in self.global_features or\n not self.global_features)\n print(\"number of global features:\", self.global_feature_num)\n # print(\"global_features:\", self.global_feature_maps)\n self.global_feature_weights = nn.Parameter(\n torch.zeros(self.global_feature_num).fill_(-0.0001))\n # decoder\n self.beam_size = config.beam_size\n self.beta_v = config.beta_v\n self.beta_e = config.beta_e\n # loss functions\n self.entity_criteria = torch.nn.CrossEntropyLoss()\n self.event_criteria = torch.nn.CrossEntropyLoss()\n self.mention_criteria = torch.nn.CrossEntropyLoss()\n self.relation_criteria = torch.nn.CrossEntropyLoss()\n self.role_criteria = torch.nn.CrossEntropyLoss()\n # others\n self.entity_crf = CRF(self.entity_label_stoi, bioes=False)\n self.trigger_crf = CRF(self.trigger_label_stoi, bioes=False)\n self.pad_vector = nn.Parameter(torch.randn(1, 1, self.bert_dim))\n\n def encode(self, piece_idxs, attention_masks, token_lens, amr_graphs):\n \"\"\"Encode input sequences with BERT\n :param piece_idxs (LongTensor): word pieces indices\n :param attention_masks (FloatTensor): attention mask\n :param token_lens (list): token lengths\n :param amr_graphs (list): list of dgl amr graphs\n \"\"\"\n batch_size, _ = piece_idxs.size()\n all_bert_outputs = self.bert(piece_idxs, attention_mask=attention_masks)\n bert_outputs = all_bert_outputs[0]\n # print('\\n')\n # print(\"bert_init_dim\", bert_outputs.shape)\n\n if self.use_extra_bert:\n extra_bert_outputs = all_bert_outputs[2][self.extra_bert]\n bert_outputs = torch.cat([bert_outputs, extra_bert_outputs], dim=2)\n\n if self.multi_piece == 'first':\n # select the first piece for multi-piece words\n offsets = token_lens_to_offsets(token_lens)\n offsets = piece_idxs.new(offsets)\n # + 1 because the first vector is for [CLS]\n offsets = offsets.unsqueeze(-1).expand(batch_size, -1, self.bert_dim) + 1\n bert_outputs = torch.gather(bert_outputs, 1, offsets)\n elif self.multi_piece == 'average':\n # average all pieces for multi-piece words\n idxs, masks, token_num, token_len = token_lens_to_idxs(token_lens)\n idxs = piece_idxs.new(idxs).unsqueeze(-1).expand(batch_size, -1, self.bert_dim) + 1\n masks = bert_outputs.new(masks).unsqueeze(-1)\n bert_outputs = torch.gather(bert_outputs, 1, idxs) * masks\n bert_outputs = bert_outputs.view(batch_size, token_num, token_len, self.bert_dim)\n bert_outputs = bert_outputs.sum(2)\n else:\n raise ValueError('Unknown multi-piece token handling strategy: {}'\n .format(self.multi_piece))\n bert_outputs = self.bert_dropout(bert_outputs)\n return bert_outputs\n\n def scores(self, bert_outputs, graphs, amrs, aligns, exists, epoch, entity_types_onehot=None,\n predict=False, gold_tri=False, gold_ent=False):\n (\n entity_idxs, entity_masks, entity_num, entity_len,\n trigger_idxs, trigger_masks, trigger_num, trigger_len,\n ) = graphs_to_node_idxs(graphs)\n\n batch_size, _, bert_dim = bert_outputs.size()\n\n entity_idxs = bert_outputs.new_tensor(entity_idxs, dtype=torch.long)\n trigger_idxs = bert_outputs.new_tensor(trigger_idxs, dtype=torch.long)\n entity_masks = bert_outputs.new_tensor(entity_masks)\n trigger_masks = bert_outputs.new_tensor(trigger_masks)\n\n # entity type scores\n entity_idxs = entity_idxs.unsqueeze(-1).expand(-1, -1, bert_dim)\n entity_masks = entity_masks.unsqueeze(-1).expand(-1, -1, bert_dim)\n entity_words = torch.gather(bert_outputs, 1, entity_idxs)\n entity_words = entity_words * entity_masks\n entity_words = entity_words.view(batch_size, entity_num, entity_len, bert_dim)\n entity_reprs = entity_words.sum(2)\n entity_type_scores = self.entity_type_ffn(entity_reprs)\n\n # mention type scores\n mention_type_scores = self.mention_type_ffn(entity_reprs)\n\n # trigger type scores\n trigger_idxs = trigger_idxs.unsqueeze(-1).expand(-1, -1, bert_dim)\n trigger_masks = trigger_masks.unsqueeze(-1).expand(-1, -1, bert_dim)\n trigger_words = torch.gather(bert_outputs, 1, trigger_idxs)\n trigger_words = trigger_words * trigger_masks\n trigger_words = trigger_words.view(batch_size, trigger_num, trigger_len, bert_dim)\n trigger_reprs = trigger_words.sum(2)\n event_type_scores = self.event_type_ffn(trigger_reprs)\n \n # Add for gold entity given case:\n # The idea is to make the gold entities' score become very high\n if gold_ent:\n for i, graph in enumerate(graphs):\n for j, ent in enumerate(graph.entities):\n entity_type_scores[i][j][ent[2]] = 10000\n \n # Add for gold trigger given case:\n # The idea is to make the gold triggers' score become very high\n if gold_tri:\n for i, graph in enumerate(graphs):\n for j, trig in enumerate(graph.triggers):\n event_type_scores[i][j][trig[2]] = 10000\n\n # must deal with them individually within a batch\n batch_size = len(amrs)\n\n if self.use_graph_encoder:\n new_trigger_reprs = bert_outputs.new_zeros(trigger_reprs.shape)\n new_entity_reprs = bert_outputs.new_zeros(entity_reprs.shape)\n\n for i in range(batch_size):\n graph_i = graphs[i]\n bert_i = bert_outputs[i]\n amr_i = amrs[i].clone()\n align_i = aligns[i]\n exist_i = exists[i]\n\n if amr_i.num_nodes() == 0:\n continue\n # entity and trigger representations\n ent_repr_i = entity_reprs[i]\n trig_repr_i = trigger_reprs[i]\n\n span_list = amr_i.ndata[\"token_span\"].tolist()\n\n amr_span_reprs = generate_span_reprs(span_list, bert_i)\n new_amr_reprs = amr_span_reprs.new_zeros(amr_span_reprs.shape)\n # amr_span_reprs: (num_span, bert_dim)\n ent_list = graph_i.entities\n trig_list = graph_i.triggers\n\n ent_to_node_idx = []\n trig_to_node_idx = []\n\n if_amr_nodes_selected = [0 for _ in range(len(span_list))]\n ent_lines = []\n trig_lines = []\n \n # update span_list and span_reprs for gnn encoding\n for j, event in enumerate(trig_list):\n # print(j)\n head_event_idx = event[1] - 1\n align_node_idx = align_i[head_event_idx]\n\n if exist_i[head_event_idx] == 1 and if_amr_nodes_selected[align_node_idx] == 0:\n if_amr_nodes_selected[align_node_idx] = 1\n new_amr_reprs[align_node_idx] = trig_repr_i[j]\n trig_lines.append(align_node_idx)\n else:\n\n amr_i.add_nodes(1)\n curr_node_num = amr_i.num_nodes()\n new_amr_reprs = torch.cat((new_amr_reprs, trig_repr_i[j:j+1]), 0)\n trig_lines.append(curr_node_num - 1)\n if_amr_nodes_selected.append(1)\n amr_i.add_edge(align_node_idx, curr_node_num - 1)\n amr_i.edata['type'][-1][0] = self.edge_type_num - 1\n\n for j, entity in enumerate(ent_list):\n head_entity_idx = entity[1] - 1\n align_node_idx = align_i[head_entity_idx]\n if exist_i[head_entity_idx] == 1 and if_amr_nodes_selected[align_node_idx] == 0:\n if_amr_nodes_selected[align_node_idx] = 1\n new_amr_reprs[align_node_idx] = ent_repr_i[j]\n ent_lines.append(align_node_idx)\n else:\n amr_i.add_nodes(1)\n curr_node_num = amr_i.num_nodes()\n new_amr_reprs = torch.cat((new_amr_reprs, ent_repr_i[j:j+1]), 0)\n ent_lines.append(curr_node_num - 1)\n if_amr_nodes_selected.append(1)\n amr_i.add_edge(align_node_idx, curr_node_num - 1)\n amr_i.edata['type'][-1][0] = self.edge_type_num - 1\n # the last edge type is new relation\n # fill in the blanks of other NODES\n for j in range(len(if_amr_nodes_selected)):\n if if_amr_nodes_selected[j] == 0:\n new_amr_reprs[j] = amr_span_reprs[j]\n \n assert (new_amr_reprs.shape[0] == amr_i.num_nodes())\n # now we have graph_i and new_amr_reprs\n check_amr_t = new_amr_reprs[bert_outputs.new_tensor(trig_lines, dtype=torch.long)]\n check_amr_e = new_amr_reprs[bert_outputs.new_tensor(ent_lines, dtype=torch.long)]\n output_embs = self.graph_encoder(amr_i, new_amr_reprs)\n\n tensor_trig_lines = bert_outputs.new_tensor(trig_lines, dtype=torch.long)\n tensor_ent_lines = bert_outputs.new_tensor(ent_lines, dtype=torch.long)\n\n new_trig_embs = output_embs[tensor_trig_lines]\n new_ent_embs = output_embs[tensor_ent_lines]\n\n new_trigger_reprs[i, 0:len(trig_lines), :] = new_trig_embs\n new_entity_reprs[i, 0:len(ent_lines), :] = new_ent_embs\n else:\n new_trigger_reprs = trigger_reprs\n new_entity_reprs = entity_reprs\n \n # relation type score\n ee_idxs = generate_pairwise_idxs(entity_num, entity_num)\n ee_idxs = entity_idxs.new(ee_idxs)\n ee_idxs = ee_idxs.unsqueeze(0).unsqueeze(-1).expand(batch_size, -1, bert_dim)\n ee_reprs = torch.cat([new_entity_reprs, new_entity_reprs], dim=1)\n ee_reprs = torch.gather(ee_reprs, 1, ee_idxs)\n ee_reprs = ee_reprs.view(batch_size, -1, 2 * bert_dim)\n relation_type_scores = self.relation_type_ffn(ee_reprs)\n # role type score\n te_idxs = generate_pairwise_idxs(trigger_num, entity_num)\n te_idxs = entity_idxs.new(te_idxs)\n te_idxs = te_idxs.unsqueeze(0).unsqueeze(-1).expand(batch_size, -1, bert_dim)\n te_reprs = torch.cat([new_trigger_reprs, new_entity_reprs], dim=1)\n te_reprs = torch.gather(te_reprs, 1, te_idxs)\n te_reprs = te_reprs.view(batch_size, -1, 2 * bert_dim)\n\n if self.use_entity_type:\n if predict:\n entity_type_scores_softmax = entity_type_scores.softmax(dim=2)\n entity_type_scores_softmax = entity_type_scores_softmax.repeat(1, trigger_num, 1)\n te_reprs = torch.cat([te_reprs, entity_type_scores_softmax], dim=2)\n else:\n entity_types_onehot = entity_types_onehot.repeat(1, trigger_num, 1)\n te_reprs = torch.cat([te_reprs, entity_types_onehot], dim=2)\n role_type_scores = self.role_type_ffn(te_reprs)\n\n return (entity_type_scores, mention_type_scores, event_type_scores,\n relation_type_scores, role_type_scores)\n\n def forward(self, batch, epoch):\n # encoding\n bert_outputs = self.encode(batch.piece_idxs,\n batch.attention_masks,\n batch.token_lens,\n batch.amr)\n \n batch_size, _, _ = bert_outputs.size()\n entity_types = batch.entity_type_idxs.view(batch_size, -1)\n entity_types = torch.clamp(entity_types, min=0)\n entity_types_onehot = bert_outputs.new_zeros(*entity_types.size(),\n self.entity_type_num)\n entity_types_onehot.scatter_(2, entity_types.unsqueeze(-1), 1)\n\n entity_label_scores = self.entity_label_ffn(bert_outputs)\n trigger_label_scores = self.trigger_label_ffn(bert_outputs)\n \n entity_label_scores = self.entity_crf.pad_logits(entity_label_scores)\n \n entity_label_loglik = self.entity_crf.loglik(entity_label_scores,\n batch.entity_label_idxs,\n batch.token_nums)\n \n trigger_label_scores = self.trigger_crf.pad_logits(trigger_label_scores)\n trigger_label_loglik = self.trigger_crf.loglik(trigger_label_scores,\n batch.trigger_label_idxs,\n batch.token_nums)\n\n scores = self.scores(bert_outputs, batch.graphs, batch.amr, batch.align, batch.exist, epoch, entity_types_onehot)\n (\n entity_type_scores, mention_type_scores, event_type_scores,\n relation_type_scores, role_type_scores\n ) = scores\n entity_type_scores = entity_type_scores.view(-1, self.entity_type_num)\n event_type_scores = event_type_scores.view(-1, self.event_type_num)\n relation_type_scores = relation_type_scores.view(-1, self.relation_type_num)\n role_type_scores = role_type_scores.view(-1, self.role_type_num)\n mention_type_scores = mention_type_scores.view(-1, self.mention_type_num)\n\n classification_loss = self.entity_criteria(entity_type_scores,\n batch.entity_type_idxs) + \\\n self.event_criteria(event_type_scores,\n batch.event_type_idxs) + \\\n self.relation_criteria(relation_type_scores,\n batch.relation_type_idxs) + \\\n self.role_criteria(role_type_scores,\n batch.role_type_idxs)\n\n loss = classification_loss - entity_label_loglik.mean() - trigger_label_loglik.mean()\n\n # global features\n if self.use_global_features:\n gold_scores = self.compute_graph_scores(batch.graphs, scores)\n top_graphs = self.generate_locally_top_graphs(batch.graphs, scores)\n top_scores = self.compute_graph_scores(top_graphs, scores)\n global_loss = (top_scores - gold_scores).clamp(min=0)\n loss = loss + global_loss.mean()\n return loss\n\n def predict(self, batch, epoch, gold_tri=False, gold_ent=False):\n self.eval()\n\n bert_outputs = self.encode(batch.piece_idxs,\n batch.attention_masks,\n batch.token_lens,\n batch.amr)\n batch_size, _, _ = bert_outputs.size()\n\n # identification\n entity_label_scores = self.entity_label_ffn(bert_outputs)\n entity_label_scores = self.entity_crf.pad_logits(entity_label_scores)\n trigger_label_scores = self.trigger_label_ffn(bert_outputs)\n trigger_label_scores = self.trigger_crf.pad_logits(trigger_label_scores)\n _, entity_label_preds = self.entity_crf.viterbi_decode(entity_label_scores,\n batch.token_nums)\n _, trigger_label_preds = self.trigger_crf.viterbi_decode(trigger_label_scores,\n batch.token_nums)\n \n \n # Add for gold trigger/ gold entity given case.\n if gold_ent == True:\n entities = [[list(entity) for entity in graph.entities] for graph in batch.graphs]\n else:\n entities = tag_paths_to_spans(entity_label_preds,\n batch.token_nums,\n self.entity_label_stoi)\n \n if gold_tri == True:\n triggers = [[list(trigger) for trigger in graph.triggers] for graph in batch.graphs]\n else:\n triggers = tag_paths_to_spans(trigger_label_preds,\n batch.token_nums,\n self.trigger_label_stoi)\n \n node_graphs = [Graph(e, t, [], [], self.vocabs)\n for e, t in zip(entities, triggers)]\n scores = self.scores(bert_outputs, node_graphs, batch.amr, batch.align, batch.exist, epoch, predict=True,\n gold_tri=gold_tri, gold_ent=gold_ent)\n max_entity_num = max(max(len(seq_entities) for seq_entities in entities), 1)\n\n batch_graphs = []\n # Decode each sentence in the batch\n for i in range(batch_size):\n seq_entities, seq_triggers = entities[i], triggers[i]\n amr_i = batch.amr[i]\n spans = sorted([(*i, True) for i in seq_entities] + [(*i, False) for i in seq_triggers], key=lambda x: (x[0], x[1], not x[-1]))\n entity_num, trigger_num = len(seq_entities), len(seq_triggers)\n if entity_num == 0 and trigger_num == 0:\n # skip decoding\n batch_graphs.append(Graph.empty_graph(self.vocabs))\n continue\n graph = self.decode(spans, amr_i,\n entity_type_scores=scores[0][i],\n mention_type_scores=scores[1][i],\n event_type_scores=scores[2][i],\n relation_type_scores=scores[3][i],\n role_type_scores=scores[4][i],\n entity_num=max_entity_num)\n batch_graphs.append(graph)\n\n self.train()\n return batch_graphs\n\n def compute_graph_scores(self, graphs, scores):\n (\n entity_type_scores, _mention_type_scores,\n trigger_type_scores, relation_type_scores,\n role_type_scores\n ) = scores\n label_idxs = graphs_to_label_idxs(graphs)\n label_idxs = [entity_type_scores.new_tensor(idx,\n dtype=torch.long if i % 2 == 0\n else torch.float)\n for i, idx in enumerate(label_idxs)]\n (\n entity_idxs, entity_mask, trigger_idxs, trigger_mask,\n relation_idxs, relation_mask, role_idxs, role_mask\n ) = label_idxs\n # Entity score\n entity_idxs = entity_idxs.unsqueeze(-1)\n entity_scores = torch.gather(entity_type_scores, 2, entity_idxs)\n entity_scores = entity_scores.squeeze(-1) * entity_mask\n entity_score = entity_scores.sum(1)\n # Trigger score\n trigger_idxs = trigger_idxs.unsqueeze(-1)\n trigger_scores = torch.gather(trigger_type_scores, 2, trigger_idxs)\n trigger_scores = trigger_scores.squeeze(-1) * trigger_mask\n trigger_score = trigger_scores.sum(1)\n # Relation score\n relation_idxs = relation_idxs.unsqueeze(-1)\n relation_scores = torch.gather(relation_type_scores, 2, relation_idxs)\n relation_scores = relation_scores.squeeze(-1) * relation_mask\n relation_score = relation_scores.sum(1)\n # Role score\n role_idxs = role_idxs.unsqueeze(-1)\n role_scores = torch.gather(role_type_scores, 2, role_idxs)\n role_scores = role_scores.squeeze(-1) * role_mask\n role_score = role_scores.sum(1)\n\n score = entity_score + trigger_score + role_score + relation_score\n\n global_vectors = [generate_global_feature_vector(g, self.global_feature_maps, features=self.global_features)\n for g in graphs]\n global_vectors = entity_scores.new_tensor(global_vectors)\n global_weights = self.global_feature_weights.unsqueeze(0).expand_as(global_vectors)\n global_score = (global_vectors * global_weights).sum(1)\n score = score + global_score\n\n return score\n\n def generate_locally_top_graphs(self, graphs, scores):\n (\n entity_type_scores, _mention_type_scores,\n trigger_type_scores, relation_type_scores,\n role_type_scores\n ) = scores\n max_entity_num = max(max([g.entity_num for g in graphs]), 1)\n top_graphs = []\n for graph_idx, graph in enumerate(graphs):\n entity_num = graph.entity_num\n trigger_num = graph.trigger_num\n _, top_entities = entity_type_scores[graph_idx].max(1)\n top_entities = top_entities.tolist()[:entity_num]\n top_entities = [(i, j, k) for (i, j, _), k in\n zip(graph.entities, top_entities)]\n _, top_triggers = trigger_type_scores[graph_idx].max(1)\n top_triggers = top_triggers.tolist()[:trigger_num]\n top_triggers = [(i, j, k) for (i, j, _), k in\n zip(graph.triggers, top_triggers)]\n # _, top_relations = relation_type_scores[graph_idx].max(1)\n # top_relations = top_relations.tolist()\n # top_relations = [(i, j, top_relations[i * max_entity_num + j])\n # for i in range(entity_num) for j in\n # range(entity_num)\n # if i < j and top_relations[i * max_entity_num + j] != 'O']\n top_relation_scores, top_relation_labels = relation_type_scores[graph_idx].max(1)\n top_relation_scores = top_relation_scores.tolist()\n top_relation_labels = top_relation_labels.tolist()\n top_relations = [(i, j) for i, j in zip(top_relation_scores, top_relation_labels)]\n top_relation_list = []\n for i in range(entity_num):\n for j in range(entity_num):\n if i < j:\n score_1, label_1 = top_relations[i * max_entity_num + j]\n score_2, label_2 = top_relations[j * max_entity_num + i]\n if score_1 > score_2 and label_1 != 'O':\n top_relation_list.append((i, j, label_1))\n if score_2 > score_1 and label_2 != 'O': \n top_relation_list.append((j, i, label_2))\n\n _, top_roles = role_type_scores[graph_idx].max(1)\n top_roles = top_roles.tolist()\n top_roles = [(i, j, top_roles[i * max_entity_num + j])\n for i in range(trigger_num) for j in range(entity_num)\n if top_roles[i * max_entity_num + j] != 'O']\n top_graphs.append(Graph(\n entities=top_entities,\n triggers=top_triggers,\n # relations=top_relations,\n relations=top_relation_list,\n roles=top_roles,\n vocabs=graph.vocabs\n ))\n return top_graphs\n\n def trim_beam_set(self, beam_set, beam_size):\n if len(beam_set) > beam_size:\n beam_set.sort(key=lambda x: self.compute_graph_score(x), reverse=True)\n beam_set = beam_set[:beam_size]\n return beam_set\n\n def compute_graph_score(self, graph):\n score = graph.graph_local_score\n if self.use_global_features:\n global_vector = generate_global_feature_vector(graph,\n self.global_feature_maps,\n features=self.global_features)\n global_vector = self.global_feature_weights.new_tensor(global_vector)\n global_score = global_vector.dot(self.global_feature_weights).item()\n score = score + global_score\n return score\n\n def decode(self, spans, amr, entity_type_scores, mention_type_scores, event_type_scores,\n relation_type_scores, role_type_scores, entity_num):\n\n\n prior_list = amr.ndata[\"priority\"].squeeze(1).tolist()\n token_pos_list = amr.ndata[\"token_pos\"].squeeze(1).tolist()\n\n split_order_list = []\n ent_num, trig_num = 0, 0\n for i in range(len(spans)):\n if spans[i][3]:\n split_order_list.append(ent_num)\n ent_num += 1\n else:\n split_order_list.append(trig_num)\n trig_num += 1\n # print(\"split\", split_order_list)\n align_list = [len(prior_list)+10 for _ in range(len(spans))]\n for i in range(len(spans)):\n start_i = spans[i][0]\n end_i = spans[i][1] - 1\n for j in range(len(token_pos_list)):\n if token_pos_list[j] >= start_i and token_pos_list[j] <= end_i:\n align_list[i] = prior_list[j]\n break\n # after we get an alignment list, we try to get the priority and sort the spans\n align_list = np.array(align_list)\n index_list = np.argsort(align_list)\n new_spans = []\n for i in range(len(spans)):\n new_spans.append(spans[index_list[i]])\n\n new_ent_score = entity_type_scores.new_zeros(entity_type_scores.size())\n new_trig_score = event_type_scores.new_zeros(event_type_scores.size())\n new_rela_score = relation_type_scores.new_zeros(relation_type_scores.size())\n new_role_score = role_type_scores.new_zeros(role_type_scores.size())\n\n # build up an individual entity and trigger list\n entity_index_list, trig_index_list = [], []\n\n for i in range(len(new_spans)):\n if new_spans[i][3]:\n entity_index_list.append(split_order_list[index_list[i]])\n else:\n trig_index_list.append(split_order_list[index_list[i]])\n\n # change the order of ent and trig score\n for i in range(ent_num):\n new_ent_score[i] = entity_type_scores[entity_index_list[i]]\n for i in range(trig_num):\n new_trig_score[i] = event_type_scores[trig_index_list[i]]\n\n # change the order of relation matrix\n for i in range(ent_num):\n for j in range(ent_num):\n new_rela_score[i * entity_num + j] = relation_type_scores[entity_index_list[i] * entity_num + entity_index_list[j]]\n\n # cahnge the order of role matrix\n for i in range(trig_num):\n for j in range(ent_num):\n new_role_score[i * entity_num + j] = role_type_scores[trig_index_list[i] * entity_num + entity_index_list[j]]\n\n\n beam_set = [Graph.empty_graph(self.vocabs)]\n entity_idx, trigger_idx = 0, 0\n\n for start, end, _, is_entity_node in new_spans:\n # 1. node step\n if is_entity_node:\n node_scores = new_ent_score[entity_idx].tolist()\n # print(node_scores)\n else:\n node_scores = new_trig_score[trigger_idx].tolist()\n node_scores_norm = normalize_score(node_scores)\n # node_scores = [(s, i) for i, s in enumerate(node_scores)]\n node_scores = [(s, i, n) for i, (s, n) in enumerate(zip(node_scores,\n node_scores_norm))]\n node_scores.sort(key=lambda x: x[0], reverse=True)\n top_node_scores = node_scores[:self.beta_v]\n\n beam_set_ = []\n for graph in beam_set:\n for score, label, score_norm in top_node_scores:\n graph_ = graph.copy()\n if is_entity_node:\n graph_.add_entity(start, end, label, score, score_norm)\n else:\n graph_.add_trigger(start, end, label, score, score_norm)\n beam_set_.append(graph_)\n beam_set = beam_set_\n\n # 2. edge step\n if is_entity_node:\n # add a new entity: new relations, new argument roles\n for i in range(entity_idx):\n # add relation edges\n # edge_scores_1 = relation_type_scores[i * entity_num + entity_idx].tolist()\n # edge_scores_2 = relation_type_scores[entity_idx * entity_num + i].tolist()\n edge_scores_1 = new_rela_score[i * entity_num + entity_idx].tolist()\n edge_scores_2 = new_rela_score[entity_idx * entity_num + i].tolist()\n # print(relation_type_scores[i * entity_num + entity_idx])\n\n # edge_scores_norm_1 = (edge_scores_1 / 10.0).softmax(0).tolist()\n # edge_scores_norm_2 = (edge_scores_2 / 10.0).softmax(0).tolist()\n # edge_scores_1 = edge_scores_1.tolist()\n # edge_scores_2 = edge_scores_2.tolist()\n edge_scores_norm_1 = normalize_score(edge_scores_1)\n edge_scores_norm_2 = normalize_score(edge_scores_2)\n\n if self.relation_directional:\n edge_scores = [(max(s1, s2), n2 if s1 < s2 else n1, i, s1 < s2)\n for i, (s1, s2, n1, n2)\n in enumerate(zip(edge_scores_1, edge_scores_2,\n edge_scores_norm_1,\n edge_scores_norm_2))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n else:\n edge_scores = [(max(s1, s2), n2 if s1 < n2 else n1, i, False)\n for i, (s1, s2, n1, n2)\n in enumerate(zip(edge_scores_1, edge_scores_2,\n edge_scores_norm_1,\n edge_scores_norm_2))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n\n beam_set_ = []\n for graph in beam_set:\n has_valid_edge = False\n for score, score_norm, label, inverse in top_edge_scores:\n rel_cur_ent = label * 100 + graph.entities[-1][-1]\n rel_pre_ent = label * 100 + graph.entities[i][-1]\n if label == 0 or (rel_pre_ent in self.valid_relation_entity and\n rel_cur_ent in self.valid_relation_entity):\n graph_ = graph.copy()\n if self.relation_directional and inverse:\n graph_.add_relation(entity_idx, i, label, score, score_norm)\n else:\n graph_.add_relation(i, entity_idx, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n if not has_valid_edge:\n graph_ = graph.copy()\n graph_.add_relation(i, entity_idx, 0, null_score)\n beam_set_.append(graph_)\n beam_set = beam_set_\n if len(beam_set) > 200:\n beam_set = self.trim_beam_set(beam_set, self.beam_size)\n\n for i in range(trigger_idx):\n # add argument role edges\n edge_scores = new_role_score[i * entity_num + entity_idx].tolist()\n edge_scores_norm = normalize_score(edge_scores)\n edge_scores = [(s, i, n) for i, (s, n) in enumerate(zip(edge_scores, edge_scores_norm))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n\n beam_set_ = []\n for graph in beam_set:\n has_valid_edge = False\n for score, label, score_norm in top_edge_scores:\n role_entity = label * 100 + graph.entities[-1][-1]\n event_role = graph.triggers[i][-1] * 100 + label\n if label == 0 or (event_role in self.valid_event_role and\n role_entity in self.valid_role_entity):\n graph_ = graph.copy()\n graph_.add_role(i, entity_idx, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n if not has_valid_edge:\n graph_ = graph.copy()\n graph_.add_role(i, entity_idx, 0, null_score)\n beam_set_.append(graph_)\n beam_set = beam_set_\n if len(beam_set) > 100:\n beam_set = self.trim_beam_set(beam_set, self.beam_size)\n beam_set = self.trim_beam_set(beam_set_, self.beam_size)\n\n else:\n # add a new trigger: new argument roles\n for i in range(entity_idx):\n edge_scores = new_role_score[trigger_idx * entity_num + i].tolist()\n edge_scores_norm = normalize_score(edge_scores)\n edge_scores = [(s, i, n) for i, (s, n) in enumerate(zip(edge_scores,\n edge_scores_norm))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n\n beam_set_ = []\n for graph in beam_set:\n has_valid_edge = False\n for score, label, score_norm in top_edge_scores:\n event_role = graph.triggers[-1][-1] * 100 + label\n role_entity = label * 100 + graph.entities[i][-1]\n if label == 0 or (event_role in self.valid_event_role\n and role_entity in self.valid_role_entity):\n graph_ = graph.copy()\n graph_.add_role(trigger_idx, i, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n if not has_valid_edge:\n graph_ = graph.copy()\n graph_.add_role(trigger_idx, i, 0, null_score)\n beam_set_.append(graph_)\n beam_set = beam_set_\n if len(beam_set) > 100:\n beam_set = self.trim_beam_set(beam_set, self.beam_size)\n\n beam_set = self.trim_beam_set(beam_set_, self.beam_size)\n\n if is_entity_node:\n entity_idx += 1\n else:\n trigger_idx += 1\n beam_set.sort(key=lambda x: self.compute_graph_score(x), reverse=True)\n graph = beam_set[0]\n\n # predict mention types\n _, mention_types = mention_type_scores.max(dim=1)\n mention_types = mention_types[:entity_idx]\n mention_list = [(i, j, l.item()) for (i, j, k), l\n in zip(graph.entities, mention_types)]\n graph.mentions = mention_list\n\n return graph" }, { "identifier": "IEDataset", "path": "TextEE/models/AMRIE/data.py", "snippet": "class IEDataset(Dataset):\n def __init__(self, raw_data, tokenizer, graph_list, align_list, exist_list, max_length=128, gpu=False,\n relation_mask_self=True, relation_directional=False,\n coref=False, symmetric_relations=None, test=False):\n self.raw_data = raw_data\n self.data = []\n self.gpu = gpu\n self.max_length = max_length\n self.relation_mask_self = relation_mask_self\n self.relation_directional = relation_directional\n self.coref = coref\n self.amr_graphs = graph_list\n self.align_list = align_list\n self.exist_list = exist_list\n self.test = test\n if symmetric_relations is None:\n self.symmetric_relations = set()\n else:\n self.symmetric_relations = symmetric_relations\n \n self.tokenizer = tokenizer\n self.load_data()\n # print(\"data\", len(self.data))\n # print(\"type\", type(self.data))\n # print(self.data[0])\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, item):\n return self.data[item]\n\n @property\n def entity_type_set(self):\n type_set = set()\n for inst in self.data:\n for entity in inst['entity_mentions']:\n type_set.add(entity.get('entity_type', \"UNK\"))\n return type_set\n\n @property\n def event_type_set(self):\n type_set = set()\n for inst in self.data:\n for event in inst['event_mentions']:\n type_set.add(event['event_type'])\n return type_set\n\n @property\n def relation_type_set(self):\n type_set = set()\n for inst in self.data:\n for relation in inst.get('relation_mentions', []):\n type_set.add(relation['relation_type'])\n return type_set\n\n @property\n def role_type_set(self):\n type_set = set()\n for inst in self.data:\n for event in inst['event_mentions']:\n for arg in event['arguments']:\n type_set.add(arg['role'])\n return type_set\n\n def load_data(self):\n \"\"\"Load data from file.\"\"\"\n overlength_num = 0\n self.skip_insts = set()\n for i, inst in enumerate(self.raw_data):\n pieces = [self.tokenizer.tokenize(t, is_split_into_words=True) for t in inst['tokens']]\n inst_len = len(pieces)\n if self.max_length != -1 and inst_len > self.max_length - 2:\n if self.test:\n # will skip this instance during inference\n self.skip_insts.add((inst[\"doc_id\"], inst[\"wnd_id\"]))\n inst_ = copy.deepcopy(self.data[-1])\n inst_[\"doc_id\"] = inst[\"doc_id\"]\n inst_[\"wnd_id\"] = inst[\"wnd_id\"]\n self.data.append(inst_)\n continue\n else:\n overlength_num += 1\n continue\n \n token_lens = [len(x) for x in pieces]\n if 0 in token_lens:\n raise ValueError\n pieces = [p for ps in pieces for p in ps]\n inst['pieces'] = pieces\n inst['token_lens'] = token_lens\n \n inst['entity_mentions'] = inst['extra_info']['entity_mentions']\n inst['relation_mentions'] = inst['extra_info']['relation_mentions']\n inst['event_mentions'] = inst['extra_info']['event_mentions']\n inst.update({\"amr\": self.amr_graphs[i]})\n inst.update({\"align\": self.align_list[i]})\n inst.update({\"exist\": self.exist_list[i]})\n # print(inst)\n self.data.append(inst)\n\n if overlength_num:\n logger.info('Discarded {} overlength instances'.format(overlength_num))\n logger.info('Loaded {} OneIE instances from {} E2E instances'.format(len(self), len(self.raw_data)))\n\n def numberize(self, tokenizer, vocabs):\n \"\"\"Numberize word pieces, labels, etcs.\n :param tokenizer: Bert tokenizer.\n :param vocabs (dict): a dict of vocabularies.\n \"\"\"\n entity_type_stoi = vocabs['entity_type']\n event_type_stoi = vocabs['event_type']\n relation_type_stoi = vocabs['relation_type']\n role_type_stoi = vocabs['role_type']\n mention_type_stoi = vocabs['mention_type']\n entity_label_stoi = vocabs['entity_label']\n trigger_label_stoi = vocabs['trigger_label']\n\n data = []\n for inst in self.data:\n tokens = inst['tokens']\n pieces = inst['pieces']\n sent_id = inst['wnd_id']\n entities = inst['entity_mentions']\n entities, entity_id_map = remove_overlap_entities(entities)\n entities.sort(key=lambda x: x['start'])\n events = inst['event_mentions']\n events.sort(key=lambda x: x['trigger']['start'])\n relations = inst['relation_mentions']\n token_num = len(tokens)\n token_lens = inst['token_lens']\n amr_graph = inst['amr']\n align_list = inst['align']\n exist_list = inst['exist']\n\n # Pad word pieces with special tokens\n piece_idxs = tokenizer.encode(pieces,\n add_special_tokens=True,\n max_length=self.max_length,\n truncation=True)\n pad_num = self.max_length - len(piece_idxs)\n attn_mask = [1] * len(piece_idxs) + [0] * pad_num\n piece_idxs = piece_idxs + [0] * pad_num\n\n # Entity\n # - entity_labels and entity_label_idxs are used for identification\n # - entity_types and entity_type_idxs are used for classification\n # - entity_list is used for graph representation\n entity_labels = get_entity_labels(entities, token_num)\n entity_label_idxs = [entity_label_stoi[l] for l in entity_labels]\n entity_types = [e.get('entity_type', \"UNK\") for e in entities]\n entity_type_idxs = [entity_type_stoi[l] for l in entity_types]\n entity_list = [(e['start'], e['end'], entity_type_stoi[e.get('entity_type', \"UNK\")])\n for e in entities]\n # entity_num = len(entity_list)\n mention_types = [e.get('mention_type', \"UNK\") for e in entities]\n mention_type_idxs = [mention_type_stoi[l] for l in mention_types]\n mention_list = [(i, j, l) for (i, j, k), l\n in zip(entity_list, mention_type_idxs)]\n\n # Trigger\n # - trigger_labels and trigger_label_idxs are used for identification\n # - event_types and event_type_idxs are used for classification\n # - trigger_list is used for graph representation\n trigger_labels = get_trigger_labels(events, token_num)\n trigger_label_idxs = [trigger_label_stoi[l]\n for l in trigger_labels]\n event_types = [e['event_type'] for e in events]\n event_type_idxs = [event_type_stoi[l] for l in event_types]\n trigger_list = [(e['trigger']['start'], e['trigger']['end'],\n event_type_stoi[e['event_type']])\n for e in events]\n\n # Relation\n relation_types = get_relation_types(entities, relations,\n entity_id_map,\n directional=self.relation_directional,\n symmetric=self.symmetric_relations)\n relation_type_idxs = [[relation_type_stoi[l] for l in ls]\n for ls in relation_types]\n if self.relation_mask_self:\n for i in range(len(relation_type_idxs)):\n relation_type_idxs[i][i] = -100\n relation_list = get_relation_list(entities, relations,\n entity_id_map, relation_type_stoi,\n directional=self.relation_directional,\n symmetric=self.symmetric_relations)\n\n # Argument role\n role_types = get_role_types(entities, events, entity_id_map)\n role_type_idxs = [[role_type_stoi[l] for l in ls]\n for ls in role_types]\n role_list = get_role_list(entities, events,\n entity_id_map, role_type_stoi)\n\n # Graph\n graph = Graph(\n entities=entity_list,\n triggers=trigger_list,\n relations=relation_list,\n roles=role_list,\n mentions=mention_list,\n vocabs=vocabs,\n )\n\n instance = Instance(\n sent_id=sent_id,\n tokens=tokens,\n pieces=pieces,\n piece_idxs=piece_idxs,\n token_lens=token_lens,\n attention_mask=attn_mask,\n entity_label_idxs=entity_label_idxs,\n trigger_label_idxs=trigger_label_idxs,\n entity_type_idxs=entity_type_idxs,\n event_type_idxs=event_type_idxs,\n relation_type_idxs=relation_type_idxs,\n mention_type_idxs=mention_type_idxs,\n role_type_idxs=role_type_idxs,\n graph=graph,\n entity_num=len(entities),\n trigger_num=len(events),\n amr=amr_graph,\n align=align_list,\n exist=exist_list\n )\n data.append(instance)\n self.data = data\n\n def collate_fn(self, batch):\n # print(batch)\n batch_piece_idxs = []\n batch_tokens = []\n batch_entity_labels, batch_trigger_labels = [], []\n batch_entity_types, batch_event_types = [], []\n batch_relation_types, batch_role_types = [], []\n batch_mention_types = []\n batch_graphs = []\n batch_token_lens = []\n batch_attention_masks = []\n\n sent_ids = [inst.sent_id for inst in batch]\n token_nums = [len(inst.tokens) for inst in batch]\n max_token_num = max(token_nums)\n\n max_entity_num = max([inst.entity_num for inst in batch] + [1])\n max_trigger_num = max([inst.trigger_num for inst in batch] + [1])\n\n amrs = []\n aligns = []\n exists = []\n\n for inst in batch:\n token_num = len(inst.tokens)\n batch_piece_idxs.append(inst.piece_idxs)\n batch_attention_masks.append(inst.attention_mask)\n batch_token_lens.append(inst.token_lens)\n batch_graphs.append(inst.graph)\n batch_tokens.append(inst.tokens)\n # for identification\n batch_entity_labels.append(inst.entity_label_idxs +\n [0] * (max_token_num - token_num))\n batch_trigger_labels.append(inst.trigger_label_idxs +\n [0] * (max_token_num - token_num))\n # for classification\n batch_entity_types.extend(inst.entity_type_idxs +\n [-100] * (max_entity_num - inst.entity_num))\n batch_event_types.extend(inst.event_type_idxs +\n [-100] * (max_trigger_num - inst.trigger_num))\n batch_mention_types.extend(inst.mention_type_idxs +\n [-100] * (max_entity_num - inst.entity_num))\n for l in inst.relation_type_idxs:\n batch_relation_types.extend(\n l + [-100] * (max_entity_num - inst.entity_num))\n batch_relation_types.extend(\n [-100] * max_entity_num * (max_entity_num - inst.entity_num))\n for l in inst.role_type_idxs:\n batch_role_types.extend(\n l + [-100] * (max_entity_num - inst.entity_num))\n batch_role_types.extend(\n [-100] * max_entity_num * (max_trigger_num - inst.trigger_num))\n amrs.append(inst.amr)\n aligns.append(inst.align)\n exists.append(inst.exist)\n \n\n if self.gpu:\n batch_piece_idxs = torch.cuda.LongTensor(batch_piece_idxs)\n batch_attention_masks = torch.cuda.FloatTensor(\n batch_attention_masks)\n\n batch_entity_labels = torch.cuda.LongTensor(batch_entity_labels)\n batch_trigger_labels = torch.cuda.LongTensor(batch_trigger_labels)\n batch_entity_types = torch.cuda.LongTensor(batch_entity_types)\n batch_mention_types = torch.cuda.LongTensor(batch_mention_types)\n batch_event_types = torch.cuda.LongTensor(batch_event_types)\n batch_relation_types = torch.cuda.LongTensor(batch_relation_types)\n batch_role_types = torch.cuda.LongTensor(batch_role_types)\n\n token_nums = torch.cuda.LongTensor(token_nums)\n else:\n batch_piece_idxs = torch.LongTensor(batch_piece_idxs)\n batch_attention_masks = torch.FloatTensor(batch_attention_masks)\n\n batch_entity_labels = torch.LongTensor(batch_entity_labels)\n batch_trigger_labels = torch.LongTensor(batch_trigger_labels)\n batch_entity_types = torch.LongTensor(batch_entity_types)\n batch_mention_types = torch.LongTensor(batch_mention_types)\n batch_event_types = torch.LongTensor(batch_event_types)\n batch_relation_types = torch.LongTensor(batch_relation_types)\n batch_role_types = torch.LongTensor(batch_role_types)\n\n token_nums = torch.LongTensor(token_nums)\n\n return Batch(\n sent_ids=sent_ids,\n tokens=[inst.tokens for inst in batch],\n piece_idxs=batch_piece_idxs,\n token_lens=batch_token_lens,\n attention_masks=batch_attention_masks,\n entity_label_idxs=batch_entity_labels,\n trigger_label_idxs=batch_trigger_labels,\n entity_type_idxs=batch_entity_types,\n mention_type_idxs=batch_mention_types,\n event_type_idxs=batch_event_types,\n relation_type_idxs=batch_relation_types,\n role_type_idxs=batch_role_types,\n graphs=batch_graphs,\n token_nums=token_nums,\n amr=amrs,\n align=aligns,\n exist=exists\n )" }, { "identifier": "generate_vocabs", "path": "TextEE/models/AMRIE/util.py", "snippet": "def generate_vocabs(datasets, coref=False,\n relation_directional=False,\n symmetric_relations=None):\n \"\"\"Generate vocabularies from a list of data sets\n :param datasets (list): A list of data sets\n :return (dict): A dictionary of vocabs\n \"\"\"\n entity_type_set = set()\n event_type_set = set()\n relation_type_set = set()\n role_type_set = set()\n for dataset in datasets:\n entity_type_set.update(dataset.entity_type_set)\n event_type_set.update(dataset.event_type_set)\n relation_type_set.update(dataset.relation_type_set)\n role_type_set.update(dataset.role_type_set)\n\n # add inverse relation types for non-symmetric relations\n if relation_directional:\n if symmetric_relations is None:\n symmetric_relations = []\n relation_type_set_ = set()\n for relation_type in relation_type_set:\n relation_type_set_.add(relation_type)\n if relation_directional and relation_type not in symmetric_relations:\n relation_type_set_.add(relation_type + '_inv')\n\n # entity and trigger labels\n prefix = ['B', 'I']\n entity_label_stoi = {'O': 0}\n trigger_label_stoi = {'O': 0}\n for t in entity_type_set:\n for p in prefix:\n entity_label_stoi['{}-{}'.format(p, t)] = len(entity_label_stoi)\n for t in event_type_set:\n for p in prefix:\n trigger_label_stoi['{}-{}'.format(p, t)] = len(trigger_label_stoi)\n\n entity_type_stoi = {k: i for i, k in enumerate(entity_type_set, 1)}\n entity_type_stoi['O'] = 0\n\n event_type_stoi = {k: i for i, k in enumerate(event_type_set, 1)}\n event_type_stoi['O'] = 0\n\n relation_type_stoi = {k: i for i, k in enumerate(relation_type_set, 1)}\n relation_type_stoi['O'] = 0\n if coref:\n relation_type_stoi['COREF'] = len(relation_type_stoi)\n\n role_type_stoi = {k: i for i, k in enumerate(role_type_set, 1)}\n role_type_stoi['O'] = 0\n\n mention_type_stoi = {'NAM': 0, 'NOM': 1, 'PRO': 2, 'UNK': 3, 'NEU': 4}\n\n return {\n 'entity_type': entity_type_stoi,\n 'event_type': event_type_stoi,\n 'relation_type': relation_type_stoi,\n 'role_type': role_type_stoi,\n 'mention_type': mention_type_stoi,\n 'entity_label': entity_label_stoi,\n 'trigger_label': trigger_label_stoi,\n }" }, { "identifier": "load_valid_patterns", "path": "TextEE/models/AMRIE/util.py", "snippet": "def load_valid_patterns(path, vocabs):\n event_type_vocab = vocabs['event_type']\n entity_type_vocab = vocabs['entity_type']\n relation_type_vocab = vocabs['relation_type']\n role_type_vocab = vocabs['role_type']\n\n # valid event-role\n valid_event_role = set()\n event_role = json.load(\n open(os.path.join(path, 'event_role.json'), 'r', encoding='utf-8'))\n for event, roles in event_role.items():\n if event not in event_type_vocab:\n continue\n event_type_idx = event_type_vocab[event]\n for role in roles:\n if role not in role_type_vocab:\n continue\n role_type_idx = role_type_vocab[role]\n valid_event_role.add(event_type_idx * 100 + role_type_idx)\n\n # valid relation-entity\n valid_relation_entity = set()\n relation_entity = json.load(\n open(os.path.join(path, 'relation_entity.json'), 'r', encoding='utf-8'))\n for relation, entities in relation_entity.items():\n relation_type_idx = relation_type_vocab[relation]\n for entity in entities:\n entity_type_idx = entity_type_vocab[entity]\n valid_relation_entity.add(\n relation_type_idx * 100 + entity_type_idx)\n\n # valid role-entity\n valid_role_entity = set()\n role_entity = json.load(\n open(os.path.join(path, 'role_entity.json'), 'r', encoding='utf-8'))\n for role, entities in role_entity.items():\n if role not in role_type_vocab:\n continue\n role_type_idx = role_type_vocab[role]\n for entity in entities:\n entity_type_idx = entity_type_vocab[entity]\n valid_role_entity.add(role_type_idx * 100 + entity_type_idx)\n\n return {\n 'event_role': valid_event_role,\n 'relation_entity': valid_relation_entity,\n 'role_entity': valid_role_entity\n }" }, { "identifier": "save_result", "path": "TextEE/models/AMRIE/util.py", "snippet": "def save_result(output_file, gold_graphs, pred_graphs, sent_ids, tokens=None):\n with open(output_file, 'w', encoding='utf-8') as w:\n for i, (gold_graph, pred_graph, sent_id) in enumerate(\n zip(gold_graphs, pred_graphs, sent_ids)):\n output = {'sent_id': sent_id,\n 'gold': gold_graph.to_dict(),\n 'pred': pred_graph.to_dict()}\n if tokens:\n output['tokens'] = tokens[i]\n w.write(json.dumps(output) + '\\n')" }, { "identifier": "best_score_by_task", "path": "TextEE/models/AMRIE/util.py", "snippet": "def best_score_by_task(log_file, task, max_epoch=1000):\n with open(log_file, 'r', encoding='utf-8') as r:\n config = r.readline()\n\n best_scores = []\n best_dev_score = 0\n for line in r:\n # print(line)\n record = json.loads(line)\n dev = record['dev']\n dev_goldTri = record['dev_goldTri']\n test = record['test']\n epoch = record['epoch']\n if epoch > max_epoch:\n break\n if dev[task]['f'] > best_dev_score:\n best_dev_score = dev[task]['f']\n best_scores = [dev_goldTri, test, epoch]\n print(best_scores)\n print('Epoch: {}'.format(best_scores[-1]))\n tasks = ['entity', 'mention', 'relation', 'trigger_id', 'trigger',\n 'role_id', 'role']\n for t in tasks:\n print('{}: dev: {:.2f}, test: {:.2f}'.format(t,\n best_scores[0][t][\n 'f'] * 100.0,\n best_scores[1][t][\n 'f'] * 100.0))" }, { "identifier": "score_graphs", "path": "TextEE/models/AMRIE/scorer.py", "snippet": "def score_graphs(gold_graphs, pred_graphs,\n relation_directional=False):\n gold_arg_num = pred_arg_num = arg_idn_num = arg_class_num = 0\n gold_trigger_num = pred_trigger_num = trigger_idn_num = trigger_class_num = 0\n gold_ent_num = pred_ent_num = ent_match_num = 0\n gold_rel_num = pred_rel_num = rel_match_num = 0\n gold_men_num = pred_men_num = men_match_num = 0\n\n for gold_graph, pred_graph in zip(gold_graphs, pred_graphs):\n # Entity\n gold_entities = gold_graph.entities\n pred_entities = pred_graph.entities\n gold_ent_num += len(gold_entities)\n pred_ent_num += len(pred_entities)\n ent_match_num += len([entity for entity in pred_entities\n if entity in gold_entities])\n\n # Mention\n gold_mentions = gold_graph.mentions\n pred_mentions = pred_graph.mentions\n gold_men_num += len(gold_mentions)\n pred_men_num += len(pred_mentions)\n men_match_num += len([mention for mention in pred_mentions\n if mention in gold_mentions])\n\n # Relation\n gold_relations = gold_graph.relations\n pred_relations = pred_graph.relations\n gold_rel_num += len(gold_relations)\n pred_rel_num += len(pred_relations)\n for arg1, arg2, rel_type in pred_relations:\n arg1_start, arg1_end, _ = pred_entities[arg1]\n arg2_start, arg2_end, _ = pred_entities[arg2]\n for arg1_gold, arg2_gold, rel_type_gold in gold_relations:\n arg1_start_gold, arg1_end_gold, _ = gold_entities[arg1_gold]\n arg2_start_gold, arg2_end_gold, _ = gold_entities[arg2_gold]\n if relation_directional:\n if (arg1_start == arg1_start_gold and\n arg1_end == arg1_end_gold and\n arg2_start == arg2_start_gold and\n arg2_end == arg2_end_gold\n ) and rel_type == rel_type_gold:\n rel_match_num += 1\n break\n else:\n if ((arg1_start == arg1_start_gold and\n arg1_end == arg1_end_gold and\n arg2_start == arg2_start_gold and\n arg2_end == arg2_end_gold) or (\n arg1_start == arg2_start_gold and\n arg1_end == arg2_end_gold and\n arg2_start == arg1_start_gold and\n arg2_end == arg1_end_gold\n )) and rel_type == rel_type_gold:\n rel_match_num += 1\n break\n\n # Trigger\n gold_triggers = gold_graph.triggers\n pred_triggers = pred_graph.triggers\n gold_trigger_num += len(gold_triggers)\n pred_trigger_num += len(pred_triggers)\n for trg_start, trg_end, event_type in pred_triggers:\n matched = [item for item in gold_triggers\n if item[0] == trg_start and item[1] == trg_end]\n if matched:\n trigger_idn_num += 1\n if matched[0][-1] == event_type:\n trigger_class_num += 1\n\n # Argument\n gold_args = convert_arguments(gold_triggers, gold_entities,\n gold_graph.roles)\n pred_args = convert_arguments(pred_triggers, pred_entities,\n pred_graph.roles)\n gold_arg_num += len(gold_args)\n pred_arg_num += len(pred_args)\n for pred_arg in pred_args:\n arg_start, arg_end, event_type, role = pred_arg\n gold_idn = {item for item in gold_args\n if item[0] == arg_start and item[1] == arg_end\n and item[2] == event_type}\n if gold_idn:\n arg_idn_num += 1\n gold_class = {item for item in gold_idn if item[-1] == role}\n if gold_class:\n arg_class_num += 1\n\n entity_prec, entity_rec, entity_f = compute_f1(\n pred_ent_num, gold_ent_num, ent_match_num)\n mention_prec, mention_rec, mention_f = compute_f1(\n pred_men_num, gold_men_num, men_match_num)\n trigger_id_prec, trigger_id_rec, trigger_id_f = compute_f1(\n pred_trigger_num, gold_trigger_num, trigger_idn_num)\n trigger_prec, trigger_rec, trigger_f = compute_f1(\n pred_trigger_num, gold_trigger_num, trigger_class_num)\n relation_prec, relation_rec, relation_f = compute_f1(\n pred_rel_num, gold_rel_num, rel_match_num)\n role_id_prec, role_id_rec, role_id_f = compute_f1(\n pred_arg_num, gold_arg_num, arg_idn_num)\n role_prec, role_rec, role_f = compute_f1(\n pred_arg_num, gold_arg_num, arg_class_num)\n\n print('Entity: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n entity_prec * 100.0, entity_rec * 100.0, entity_f * 100.0))\n print('Mention: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n mention_prec * 100.0, mention_rec * 100.0, mention_f * 100.0))\n print('Trigger identification: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n trigger_id_prec * 100.0, trigger_id_rec * 100.0, trigger_id_f * 100.0))\n print('Trigger: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n trigger_prec * 100.0, trigger_rec * 100.0, trigger_f * 100.0))\n print('Relation: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n relation_prec * 100.0, relation_rec * 100.0, relation_f * 100.0))\n print('Role identification: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n role_id_prec * 100.0, role_id_rec * 100.0, role_id_f * 100.0))\n print('Role: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n role_prec * 100.0, role_rec * 100.0, role_f * 100.0))\n\n scores = {\n 'entity': {'prec': entity_prec, 'rec': entity_rec, 'f': entity_f},\n 'mention': {'prec': mention_prec, 'rec': mention_rec, 'f': mention_f},\n 'trigger': {'prec': trigger_prec, 'rec': trigger_rec, 'f': trigger_f},\n 'trigger_id': {'prec': trigger_id_prec, 'rec': trigger_id_rec,\n 'f': trigger_id_f},\n 'role': {'prec': role_prec, 'rec': role_rec, 'f': role_f},\n 'role_id': {'prec': role_id_prec, 'rec': role_id_rec, 'f': role_id_f},\n 'relation': {'prec': relation_prec, 'rec': relation_rec,\n 'f': relation_f}\n }\n return scores" } ]
import os, sys, logging, tqdm, pprint, copy import torch import numpy as np import ipdb from transformers import (BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer, AutoTokenizer, AdamW, get_linear_schedule_with_warmup) from torch.utils.data import DataLoader from torch.optim import AdamW from ..trainer import BasicTrainer from .E2Emodel import AMRIEE2EModel from .data import IEDataset from .util import generate_vocabs, load_valid_patterns, save_result, best_score_by_task from .scorer import score_graphs from scorer import compute_f1, print_scores
17,677
logger = logging.getLogger(__name__) class AMRIEE2ETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None self.valid_patterns = None @classmethod def add_extra_info_fn(cls, instances, raw_data, config): extra_info_map = {} for dt in raw_data: extra_info = { "entity_mentions": dt["entity_mentions"] if "entity_mentions" in dt else [], "relation_mentions": dt["relation_mentions"] if "relation_mentions" in dt else [], "event_mentions": dt["event_mentions"] if "event_mentions" in dt else [], } extra_info_map[(dt["doc_id"], dt["wnd_id"])] = extra_info for instance in instances: instance["extra_info"] = extra_info_map[(instance["doc_id"], instance["wnd_id"])] return instances def get_idx_map(self, tokens1, tokens2): len1, len2 = len(tokens1), len(tokens2) idx1_s, idx1_e, idx2_s, idx2_e, = 0, 0, 0, 0 idx_map = np.zeros((len2+1, ), dtype=np.int32) idx_map[-1] = len1 while idx1_e <= len1 and idx2_e <= len2: if "".join(tokens1[idx1_s:idx1_e+1]) == "".join(tokens2[idx2_s:idx2_e+1]): idx_map[idx2_s:idx2_e+1] = idx1_s idx1_s = idx1_e+1 idx1_e = idx1_e+1 idx2_s = idx2_e+1 idx2_e = idx2_e+1 elif len("".join(tokens1[idx1_s:idx1_e+1])) <= len("".join(tokens2[idx2_s:idx2_e+1])): idx1_e += 1 else: idx2_e += 1 return idx_map def load_tokenizer_(self, checkpoint=None): if checkpoint: logger.info(f"Loading tokenizer from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.tokenizer")) self.tokenizer = state["tokenizer"] else: logger.info(f"Loading tokenizer from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('bert-'): self.tokenizer = BertTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('roberta-'): self.tokenizer = RobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('xlm-roberta-'): self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, do_lower_case=False) def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"] self.model = AMRIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else: self.valid_patterns = load_valid_patterns(self.config.valid_pattern_path, self.vocabs) self.model = AMRIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.cuda(device=self.config.gpu_device) def load_model(self, checkpoint=None): self.load_tokenizer_(checkpoint=checkpoint) self.load_model_(checkpoint=checkpoint) def train(self, train_data, dev_data, **kwargs): logger.info("Loading graphs") org_train_graphs, train_align, train_exist = torch.load(self.config.processed_train_amr) org_dev_graphs, dev_align, dev_exist = torch.load(self.config.processed_dev_amr) train_graphs, dev_graphs = [], [] for g in org_train_graphs: g_device = g.to(self.config.gpu_device) train_graphs.append(g_device) for g in org_dev_graphs: g_device = g.to(self.config.gpu_device) dev_graphs.append(g_device) self.load_tokenizer_()
logger = logging.getLogger(__name__) class AMRIEE2ETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None self.valid_patterns = None @classmethod def add_extra_info_fn(cls, instances, raw_data, config): extra_info_map = {} for dt in raw_data: extra_info = { "entity_mentions": dt["entity_mentions"] if "entity_mentions" in dt else [], "relation_mentions": dt["relation_mentions"] if "relation_mentions" in dt else [], "event_mentions": dt["event_mentions"] if "event_mentions" in dt else [], } extra_info_map[(dt["doc_id"], dt["wnd_id"])] = extra_info for instance in instances: instance["extra_info"] = extra_info_map[(instance["doc_id"], instance["wnd_id"])] return instances def get_idx_map(self, tokens1, tokens2): len1, len2 = len(tokens1), len(tokens2) idx1_s, idx1_e, idx2_s, idx2_e, = 0, 0, 0, 0 idx_map = np.zeros((len2+1, ), dtype=np.int32) idx_map[-1] = len1 while idx1_e <= len1 and idx2_e <= len2: if "".join(tokens1[idx1_s:idx1_e+1]) == "".join(tokens2[idx2_s:idx2_e+1]): idx_map[idx2_s:idx2_e+1] = idx1_s idx1_s = idx1_e+1 idx1_e = idx1_e+1 idx2_s = idx2_e+1 idx2_e = idx2_e+1 elif len("".join(tokens1[idx1_s:idx1_e+1])) <= len("".join(tokens2[idx2_s:idx2_e+1])): idx1_e += 1 else: idx2_e += 1 return idx_map def load_tokenizer_(self, checkpoint=None): if checkpoint: logger.info(f"Loading tokenizer from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.tokenizer")) self.tokenizer = state["tokenizer"] else: logger.info(f"Loading tokenizer from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('bert-'): self.tokenizer = BertTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('roberta-'): self.tokenizer = RobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('xlm-roberta-'): self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, do_lower_case=False) def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"] self.model = AMRIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else: self.valid_patterns = load_valid_patterns(self.config.valid_pattern_path, self.vocabs) self.model = AMRIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.cuda(device=self.config.gpu_device) def load_model(self, checkpoint=None): self.load_tokenizer_(checkpoint=checkpoint) self.load_model_(checkpoint=checkpoint) def train(self, train_data, dev_data, **kwargs): logger.info("Loading graphs") org_train_graphs, train_align, train_exist = torch.load(self.config.processed_train_amr) org_dev_graphs, dev_align, dev_exist = torch.load(self.config.processed_dev_amr) train_graphs, dev_graphs = [], [] for g in org_train_graphs: g_device = g.to(self.config.gpu_device) train_graphs.append(g_device) for g in org_dev_graphs: g_device = g.to(self.config.gpu_device) dev_graphs.append(g_device) self.load_tokenizer_()
train_set = IEDataset(train_data, self.tokenizer, train_graphs, train_align, train_exist, gpu=True,
2
2023-11-15 21:32:56+00:00
24k
ahayler/s4c
scripts/benchmarks/sscbench/evaluate_model_sscbench.py
[ { "identifier": "get_cam_k", "path": "scripts/benchmarks/sscbench/generate_ply_sequence.py", "snippet": "def get_cam_k():\n cam_k = np.array(\n [\n 552.554261,\n 0.000000,\n 682.049453,\n 0.000000,\n 0.000000,\n 552.554261,\n 238.769549,\n 0.000000,\n 0.000000,\n 0.000000,\n 1.000000,\n 0.000000,\n ]\n ).reshape(3, 4)\n return cam_k[:3, :3]" }, { "identifier": "read_calib", "path": "scripts/benchmarks/sscbench/point_utils.py", "snippet": "def read_calib():\n \"\"\"\n :param calib_path: Path to a calibration text file.\n :return: dict with calibration matrices.\n \"\"\"\n P = np.array(\n [\n 552.554261,\n 0.000000,\n 682.049453,\n 0.000000,\n 0.000000,\n 552.554261,\n 238.769549,\n 0.000000,\n 0.000000,\n 0.000000,\n 1.000000,\n 0.000000,\n ]\n ).reshape(3, 4)\n\n cam2velo = np.array(\n [\n 0.04307104361,\n -0.08829286498,\n 0.995162929,\n 0.8043914418,\n -0.999004371,\n 0.007784614041,\n 0.04392796942,\n 0.2993489574,\n -0.01162548558,\n -0.9960641394,\n -0.08786966659,\n -0.1770225824,\n ]\n ).reshape(3, 4)\n C2V = np.concatenate(\n [cam2velo, np.array([0, 0, 0, 1]).reshape(1, 4)], axis=0\n )\n # print(\"C2V: \", C2V)\n V2C = np.linalg.inv(C2V)\n # print(\"V2C: \", V2C)\n V2C = V2C[:3, :]\n # print(\"V2C: \", V2C)\n\n # reshape matrices\n calib_out = {}\n # 3x4 projection matrix for left camera\n calib_out[\"P2\"] = P\n calib_out[\"Tr\"] = np.identity(4) # 4x4 matrix\n calib_out[\"Tr\"][:3, :4] = V2C\n return calib_out" }, { "identifier": "generate_point_grid", "path": "scripts/benchmarks/sscbench/point_utils.py", "snippet": "def generate_point_grid(cam_E, vox_origin, voxel_size, scene_size, cam_k, img_W=1408, img_H=376):\n \"\"\"\n compute the 2D projection of voxels centroids\n\n Parameters:\n ----------\n cam_E: 4x4\n =camera pose in case of NYUv2 dataset\n =Transformation from camera to lidar coordinate in case of SemKITTI\n cam_k: 3x3\n camera intrinsics\n vox_origin: (3,)\n world(NYU)/lidar(SemKITTI) cooridnates of the voxel at index (0, 0, 0)\n img_W: int\n image width\n img_H: int\n image height\n scene_size: (3,)\n scene size in meter: (51.2, 51.2, 6.4) for SemKITTI and (4.8, 4.8, 2.88) for NYUv2\n\n Returns\n -------\n projected_pix: (N, 2)\n Projected 2D positions of voxels\n fov_mask: (N,)\n Voxels mask indice voxels inside image's FOV\n pix_z: (N,)\n Voxels'distance to the sensor in meter\n \"\"\"\n # Compute the x, y, z bounding of the scene in meter\n vol_bnds = np.zeros((3, 2))\n vol_bnds[:, 0] = vox_origin\n vol_bnds[:, 1] = vox_origin + np.array(scene_size)\n\n # Compute the voxels centroids in lidar cooridnates\n vol_dim = np.ceil((vol_bnds[:, 1] - vol_bnds[:, 0]) / voxel_size).copy(order='C').astype(int)\n xv, yv, zv = np.meshgrid(\n range(vol_dim[0]),\n range(vol_dim[1]),\n range(vol_dim[2]),\n indexing='ij'\n )\n vox_coords = np.concatenate([\n xv.reshape(1, -1),\n yv.reshape(1, -1),\n zv.reshape(1, -1)\n ], axis=0).astype(int).T\n\n # Project voxels'centroid from lidar coordinates to camera coordinates\n cam_pts = TSDFVolume.vox2world(vox_origin, vox_coords, voxel_size)\n cam_pts = rigid_transform(cam_pts, cam_E)\n\n # Project camera coordinates to pixel positions\n projected_pix = TSDFVolume.cam2pix(cam_pts, cam_k)\n pix_x, pix_y = projected_pix[:, 0], projected_pix[:, 1]\n\n # Eliminate pixels outside view frustum\n pix_z = cam_pts[:, 2]\n fov_mask = np.logical_and(pix_x >= 0,\n np.logical_and(pix_x < img_W,\n np.logical_and(pix_y >= 0,\n np.logical_and(pix_y < img_H,\n pix_z > 0))))\n\n return cam_pts, fov_mask" }, { "identifier": "get_fov_mask", "path": "scripts/benchmarks/sscbench/point_utils.py", "snippet": "def get_fov_mask():\n calib = read_calib()\n T_velo_2_cam = calib[\"Tr\"]\n _, fov_mask = generate_point_grid(vox_origin=np.array([0, -25.6, -2]),\n scene_size=(51.2, 51.2, 6.4),\n voxel_size=0.2,\n cam_E=T_velo_2_cam,\n cam_k=get_cam_k())\n\n return fov_mask.reshape(256, 256, 32)" }, { "identifier": "save_as_voxel_ply", "path": "scripts/voxel/gen_voxelgrid_npy.py", "snippet": "def save_as_voxel_ply(path, is_occupied, size=(256, 256, 32), classes=None):\n is_occupied = remove_invisible(is_occupied)\n\n res = (size[0] + 1, size[1] + 1, size[2] + 1)\n x_range = (size[0] * .2 * .5, -size[0] * .2 * .5)\n y_range = (size[1] * .2, 0)\n z_range = (0, size[2] * .2)\n\n neighbors = check_neighbors(is_occupied)\n neighbors = neighbors.view(6, -1)[:, is_occupied.reshape(-1)].T\n\n q_pts = get_pts(x_range, y_range, z_range, *res)\n q_pts = q_pts.to(device).reshape(1, -1, 3)\n verts, faces, colors = build_voxels(is_occupied.nonzero(), *res, q_pts.squeeze(0).T, neighbors, classes=classes)\n\n verts = list(map(tuple, verts))\n colors = list(map(tuple, colors))\n verts_colors = [v + c for v, c in zip(verts, colors)]\n verts_data = np.array(verts_colors, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])\n\n face_data = np.array(faces, dtype='i4')\n ply_faces = np.empty(len(faces), dtype=[('vertex_indices', 'i4', (4,))])\n ply_faces['vertex_indices'] = face_data\n\n verts_el = PlyElement.describe(verts_data, \"vertex\")\n faces_el = PlyElement.describe(ply_faces, \"face\")\n PlyData([verts_el, faces_el]).write(str(path))" }, { "identifier": "BTSNet", "path": "models/bts/model/models_bts.py", "snippet": "class BTSNet(torch.nn.Module):\n def __init__(self, conf):\n super().__init__()\n\n self.d_min = conf.get(\"z_near\")\n self.d_max = conf.get(\"z_far\")\n\n self.learn_empty = conf.get(\"learn_empty\", True)\n self.empty_empty = conf.get(\"empty_empty\", False)\n self.inv_z = conf.get(\"inv_z\", True)\n\n self.color_interpolation = conf.get(\"color_interpolation\", \"bilinear\")\n self.code_mode = conf.get(\"code_mode\", \"z\")\n if self.code_mode not in [\"z\", \"distance\"]:\n raise NotImplementedError(f\"Unknown mode for positional encoding: {self.code_mode}\")\n\n self.encoder = make_backbone(conf[\"encoder\"])\n self.code_xyz = PositionalEncoding.from_conf(conf[\"code\"], d_in=3)\n\n self.flip_augmentation = conf.get(\"flip_augmentation\", False)\n\n self.return_sample_depth = conf.get(\"return_sample_depth\", False)\n\n self.sample_color = conf.get(\"sample_color\", True)\n\n d_in = self.encoder.latent_size + self.code_xyz.d_out\n d_out = 1 if self.sample_color else 4\n\n self._d_in = d_in\n self._d_out = d_out\n\n self.mlp_coarse = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=d_out)\n self.mlp_fine = make_mlp(conf[\"mlp_fine\"], d_in, d_out=d_out, allow_empty=True)\n\n # MLP for segmentation classes\n # TODO: Find the output dimensions automatically\n self.segmentation_mode = conf.get('segmentation_mode', None)\n if self.segmentation_mode == 'KITTI-360':\n self.mlp_segmentation = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=21)\n # self.mlp_segmentation = make_segnet(d_in=d_in, d_out=21, d_hidden_list=[64])\n elif self.segmentation_mode == 'panoptic_deeplab':\n # self.mlp_segmentation = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=19)\n self.mlp_segmentation = make_segnet(d_in=d_in, d_out=19, d_hidden_list=[64])\n # self.mlp_segmentation = make_intercept_model(d_in, d_out=21)\n\n if self.learn_empty:\n self.empty_feature = nn.Parameter(torch.randn((self.encoder.latent_size,), requires_grad=True))\n\n self._scale = 0\n\n def set_scale(self, scale):\n self._scale = scale\n\n def get_scale(self):\n return self._scale\n\n def compute_grid_transforms(self, *args, **kwargs):\n pass\n\n def encode(self, images, Ks, poses_c2w, ids_encoder=None, ids_render=None, images_alt=None, combine_ids=None):\n poses_w2c = torch.inverse(poses_c2w)\n\n if ids_encoder is None:\n images_encoder = images\n Ks_encoder = Ks\n poses_w2c_encoder = poses_w2c\n ids_encoder = list(range(len(images)))\n else:\n images_encoder = images[:, ids_encoder]\n Ks_encoder = Ks[:, ids_encoder]\n poses_w2c_encoder = poses_w2c[:, ids_encoder]\n\n if images_alt is not None:\n images = images_alt\n else:\n images = images * .5 + .5\n\n if ids_render is None:\n images_render = images\n Ks_render = Ks\n poses_w2c_render = poses_w2c\n ids_render = list(range(len(images)))\n else:\n images_render = images[:, ids_render]\n Ks_render = Ks[:, ids_render]\n poses_w2c_render = poses_w2c[:, ids_render]\n\n if combine_ids is not None:\n combine_ids = list(list(group) for group in combine_ids)\n get_combined = set(sum(combine_ids, []))\n for i in range(images.shape[1]):\n if i not in get_combined:\n combine_ids.append((i,))\n remap_encoder = {v: i for i, v in enumerate(ids_encoder)}\n remap_render = {v: i for i, v in enumerate(ids_render)}\n comb_encoder = [[remap_encoder[i] for i in group if i in ids_encoder] for group in combine_ids]\n comb_render = [[remap_render[i] for i in group if i in ids_render] for group in combine_ids]\n comb_encoder = [group for group in comb_encoder if len(group) > 0]\n comb_render = [group for group in comb_render if len(group) > 0]\n else:\n comb_encoder = None\n comb_render = None\n\n n, nv, c, h, w = images_encoder.shape\n c_l = self.encoder.latent_size\n\n if self.flip_augmentation and self.training:\n do_flip = (torch.rand(1) > .5).item()\n else:\n do_flip = False\n\n if do_flip:\n images_encoder = torch.flip(images_encoder, dims=(-1, ))\n\n image_latents_ms = self.encoder(images_encoder.view(n * nv, c, h, w))\n\n if do_flip:\n image_latents_ms = [torch.flip(il, dims=(-1, )) for il in image_latents_ms]\n\n _, _, h_, w_ = image_latents_ms[0].shape\n image_latents_ms = [F.interpolate(image_latents, (h_, w_)).view(n, nv, c_l, h_, w_) for image_latents in image_latents_ms]\n\n if torch.any(torch.isnan(torch.stack(image_latents_ms))):\n self.encoder(images_encoder.view(n * nv, c, h, w))\n # raise Exception(\"NaN in encoded features.\")\n\n self.grid_f_features = image_latents_ms\n self.grid_f_Ks = Ks_encoder\n self.grid_f_poses_w2c = poses_w2c_encoder\n self.grid_f_combine = comb_encoder\n\n self.grid_c_imgs = images_render\n self.grid_c_Ks = Ks_render\n self.grid_c_poses_w2c = poses_w2c_render\n self.grid_c_combine = comb_render\n\n def sample_features(self, xyz, use_single_featuremap=True):\n n, n_pts, _ = xyz.shape\n n, nv, c, h, w = self.grid_f_features[self._scale].shape\n\n # if use_single_featuremap:\n # nv = 1\n\n xyz = xyz.unsqueeze(1) # (n, 1, pts, 3)\n ones = torch.ones_like(xyz[..., :1])\n xyz = torch.cat((xyz, ones), dim=-1)\n xyz_projected = ((self.grid_f_poses_w2c[:, :nv, :3, :]) @ xyz.permute(0, 1, 3, 2))\n distance = torch.norm(xyz_projected, dim=-2).unsqueeze(-1)\n xyz_projected = (self.grid_f_Ks[:, :nv] @ xyz_projected).permute(0, 1, 3, 2)\n xy = xyz_projected[:, :, :, [0, 1]]\n z = xyz_projected[:, :, :, 2:3]\n\n xy = xy / z.clamp_min(EPS)\n invalid = (z <= EPS) | (xy[:, :, :, :1] < -1) | (xy[:, :, :, :1] > 1) | (xy[:, :, :, 1:2] < -1) | (xy[:, :, :, 1:2] > 1)\n\n if self.code_mode == \"z\":\n # Get z into [-1, 1] range\n if self.inv_z:\n z = (1 / z.clamp_min(EPS) - 1 / self.d_max) / (1 / self.d_min - 1 / self.d_max)\n else:\n z = (z - self.d_min) / (self.d_max - self.d_min)\n z = 2 * z - 1\n xyz_projected = torch.cat((xy, z), dim=-1)\n elif self.code_mode == \"distance\":\n if self.inv_z:\n distance = (1 / distance.clamp_min(EPS) - 1 / self.d_max) / (1 / self.d_min - 1 / self.d_max)\n else:\n distance = (distance - self.d_min) / (self.d_max - self.d_min)\n distance = 2 * distance - 1\n xyz_projected = torch.cat((xy, distance), dim=-1)\n xyz_code = self.code_xyz(xyz_projected.view(n * nv * n_pts, -1)).view(n, nv, n_pts, -1)\n\n feature_map = self.grid_f_features[self._scale][:, :nv]\n # These samples are from different scales\n if self.learn_empty:\n empty_feature_expanded = self.empty_feature.view(1, 1, 1, c).expand(n, nv, n_pts, c)\n\n sampled_features = F.grid_sample(feature_map.view(n * nv, c, h, w), xy.view(n * nv, 1, -1, 2), mode=\"bilinear\", padding_mode=\"border\", align_corners=False).view(n, nv, c, n_pts).permute(0, 1, 3, 2)\n\n if self.learn_empty:\n sampled_features[invalid.expand(-1, -1, -1, c)] = empty_feature_expanded[invalid.expand(-1, -1, -1, c)]\n\n sampled_features = torch.cat((sampled_features, xyz_code), dim=-1)\n\n # If there are multiple frames with predictions, reduce them.\n # TODO: Technically, this implementations should be improved if we use multiple frames.\n # The reduction should only happen after we perform the unprojection.\n\n if self.grid_f_combine is not None:\n invalid_groups = []\n sampled_features_groups = []\n\n for group in self.grid_f_combine:\n if len(group) == 1:\n invalid_groups.append(invalid[:, group])\n sampled_features_groups.append(sampled_features[:, group])\n\n invalid_to_combine = invalid[:, group]\n features_to_combine = sampled_features[:, group]\n\n indices = torch.min(invalid_to_combine, dim=1, keepdim=True)[1]\n invalid_picked = torch.gather(invalid_to_combine, dim=1, index=indices)\n features_picked = torch.gather(features_to_combine, dim=1, index=indices.expand(-1, -1, -1, features_to_combine.shape[-1]))\n\n invalid_groups.append(invalid_picked)\n sampled_features_groups.append(features_picked)\n\n invalid = torch.cat(invalid_groups, dim=1)\n sampled_features = torch.cat(sampled_features_groups, dim=1)\n\n if use_single_featuremap:\n sampled_features = sampled_features.mean(dim=1)\n invalid = torch.any(invalid, dim=1)\n\n return sampled_features, invalid\n\n def sample_colors(self, xyz):\n n, n_pts, _ = xyz.shape\n n, nv, c, h, w = self.grid_c_imgs.shape\n xyz = xyz.unsqueeze(1) # (n, 1, pts, 3)\n ones = torch.ones_like(xyz[..., :1])\n xyz = torch.cat((xyz, ones), dim=-1)\n xyz_projected = ((self.grid_c_poses_w2c[:, :, :3, :]) @ xyz.permute(0, 1, 3, 2))\n distance = torch.norm(xyz_projected, dim=-2).unsqueeze(-1)\n xyz_projected = (self.grid_c_Ks @ xyz_projected).permute(0, 1, 3, 2)\n xy = xyz_projected[:, :, :, [0, 1]]\n z = xyz_projected[:, :, :, 2:3]\n\n # This scales the x-axis into the right range.\n xy = xy / z.clamp_min(EPS)\n invalid = (z <= EPS) | (xy[:, :, :, :1] < -1) | (xy[:, :, :, :1] > 1) | (xy[:, :, :, 1:2] < -1) | (xy[:, :, :, 1:2] > 1)\n\n sampled_colors = F.grid_sample(self.grid_c_imgs.view(n * nv, c, h, w), xy.view(n * nv, 1, -1, 2), mode=self.color_interpolation, padding_mode=\"border\", align_corners=False).view(n, nv, c, n_pts).permute(0, 1, 3, 2)\n assert not torch.any(torch.isnan(sampled_colors))\n\n if self.grid_c_combine is not None:\n invalid_groups = []\n sampled_colors_groups = []\n\n for group in self.grid_c_combine:\n if len(group) == 1:\n invalid_groups.append(invalid[:, group])\n sampled_colors_groups.append(sampled_colors[:, group])\n continue\n\n invalid_to_combine = invalid[:, group]\n colors_to_combine = sampled_colors[:, group]\n\n indices = torch.min(invalid_to_combine, dim=1, keepdim=True)[1]\n invalid_picked = torch.gather(invalid_to_combine, dim=1, index=indices)\n colors_picked = torch.gather(colors_to_combine, dim=1, index=indices.expand(-1, -1, -1, colors_to_combine.shape[-1]))\n\n invalid_groups.append(invalid_picked)\n sampled_colors_groups.append(colors_picked)\n\n invalid = torch.cat(invalid_groups, dim=1)\n sampled_colors = torch.cat(sampled_colors_groups, dim=1)\n\n if self.return_sample_depth:\n distance = distance.view(n, nv, n_pts, 1)\n sampled_colors = torch.cat((sampled_colors, distance), dim=-1)\n\n return sampled_colors, invalid\n\n def forward(self, xyz, coarse=True, viewdirs=None, far=False, only_density=False, predict_segmentation=False):\n \"\"\"\n Predict (r, g, b, sigma) at world space points xyz.\n Please call encode first!\n :param xyz (B, 3)\n B is batch of points (in rays)\n :param predict_segmentation, if true also return the segmentation distribution for all the points\n :return (B, 4) r g b sigma\n \"\"\"\n\n with profiler.record_function(\"model_inference\"):\n n, n_pts, _ = xyz.shape\n nv = self.grid_c_imgs.shape[1]\n\n if self.grid_c_combine is not None:\n nv = len(self.grid_c_combine)\n\n # Sampled features all has shape: scales [n, n_pts, c + xyz_code]\n sampled_features, invalid_features = self.sample_features(xyz, use_single_featuremap=not only_density) # invalid features (n, n_pts, 1)\n sampled_features = sampled_features.reshape(n * n_pts, -1)\n\n mlp_input = sampled_features.view(n, n_pts, -1)\n\n # Camera frustum culling stuff, currently disabled\n combine_index = None\n dim_size = None\n\n # Run main NeRF network\n if coarse or self.mlp_fine is None:\n mlp_output = self.mlp_coarse(\n mlp_input,\n combine_inner_dims=(n_pts,),\n combine_index=combine_index,\n dim_size=dim_size,\n )\n else:\n mlp_output = self.mlp_fine(\n mlp_input,\n combine_inner_dims=(n_pts,),\n combine_index=combine_index,\n dim_size=dim_size,\n )\n\n segs = None\n if predict_segmentation:\n segs = self.mlp_segmentation(mlp_input)\n # print(next(self.mlp_segmentation.parameters()))\n # softmax to get a class distribution\n segs = F.softmax(segs, dim=2)\n # (n, pts, c) -> (n, n_pts, c)\n mlp_output = mlp_output.reshape(n, n_pts, self._d_out)\n\n if self.sample_color:\n sigma = mlp_output[..., :1]\n sigma = F.softplus(sigma)\n rgb, invalid_colors = self.sample_colors(xyz) # (n, nv, pts, 3)\n else:\n sigma = mlp_output[..., :1]\n sigma = F.relu(sigma)\n rgb = mlp_output[..., 1:4].reshape(n, 1, n_pts, 3)\n rgb = F.sigmoid(rgb)\n invalid_colors = invalid_features.unsqueeze(-2)\n nv = 1\n\n if self.empty_empty:\n sigma[invalid_features[..., 0]] = 0\n # TODO: Think about this!\n # Since we don't train the colors directly, lets use softplus instead of relu\n\n if not only_density:\n _, _, _, c = rgb.shape\n rgb = rgb.permute(0, 2, 1, 3).reshape(n, n_pts, nv * c) # (n, pts, nv * 3)\n invalid_colors = invalid_colors.permute(0, 2, 1, 3).reshape(n, n_pts, nv)\n\n invalid = invalid_colors | invalid_features # Invalid features gets broadcasted to (n, n_pts, nv)\n invalid = invalid.to(rgb.dtype)\n else:\n rgb = torch.zeros((n, n_pts, nv * 3), device=sigma.device)\n invalid = invalid_features.to(sigma.dtype)\n\n if predict_segmentation:\n return rgb, invalid, sigma, segs\n else:\n return rgb, invalid, sigma" }, { "identifier": "ImageRaySampler", "path": "models/bts/model/ray_sampler.py", "snippet": "class ImageRaySampler(RaySampler):\n def __init__(self, z_near, z_far, height=None, width=None, channels=3, norm_dir=True):\n self.z_near = z_near\n self.z_far = z_far\n self.height = height\n self.width = width\n self.channels = channels\n self.norm_dir = norm_dir\n\n def sample(self, images, poses, projs, segs=None, sample_segs=False):\n n, v, _, _ = poses.shape\n\n if self.height is None:\n self.height, self.width = images.shape[-2:]\n\n all_rgb_gt = []\n all_rays = []\n all_segs_gt = []\n\n for n_ in range(n):\n focals = projs[n_, :, [0, 1], [0, 1]]\n centers = projs[n_, :, [0, 1], [2, 2]]\n\n rays = util.gen_rays(poses[n_].view(-1, 4, 4), self.width, self.height, focal=focals, c=centers, z_near=self.z_near, z_far=self.z_far, norm_dir=self.norm_dir).view(-1, 8)\n all_rays.append(rays)\n\n if images is not None:\n rgb_gt = images[n_].view(-1, self.channels, self.height, self.width)\n rgb_gt = (rgb_gt.permute(0, 2, 3, 1).contiguous().reshape(-1, self.channels))\n all_rgb_gt.append(rgb_gt)\n\n if sample_segs:\n segs_gt = segs[n_].view(-1, 1, self.height, self.width)\n segs_gt = (segs_gt.permute(0, 2, 3, 1).contiguous().reshape(-1, 1))\n all_segs_gt.append(segs_gt)\n\n all_rays = torch.stack(all_rays)\n if images is not None:\n all_rgb_gt = torch.stack(all_rgb_gt)\n else:\n all_rgb_gt = None\n\n if sample_segs:\n all_segs_gt = torch.stack(all_segs_gt)\n # the None accounts for the patch_to_image\n return all_rays, all_rgb_gt, all_segs_gt, None\n else:\n return all_rays, all_rgb_gt\n\n def reconstruct(self, render_dict, channels=None, reconstruct_segmentation=False):\n coarse = render_dict[\"coarse\"]\n fine = render_dict[\"fine\"]\n\n if channels is None:\n channels = self.channels\n\n if reconstruct_segmentation:\n c_segmentation = coarse[\"segs\"]\n # c_segmentation_raw = coarse[\"segs_raw\"]\n n_classes = c_segmentation.shape[-1]\n # n_samples = c_segmentation_raw.shape[-2]\n\n c_rgb = coarse[\"rgb\"] # n, n_pts, v * 3\n c_weights = coarse[\"weights\"]\n c_depth = coarse[\"depth\"]\n c_invalid = coarse[\"invalid\"]\n\n f_rgb = fine[\"rgb\"] # n, n_pts, v * 3\n f_weights = fine[\"weights\"]\n f_depth = fine[\"depth\"]\n f_invalid = fine[\"invalid\"]\n\n n, n_pts, v_c = c_rgb.shape\n v_in = n_pts // (self.height * self.width)\n v_render = v_c // channels\n c_n_smps = c_weights.shape[-1]\n f_n_smps = f_weights.shape[-1]\n # (This can be a different v from the sample method)\n\n if reconstruct_segmentation:\n coarse[\"segs\"] = c_segmentation.view(n, v_in, self.height, self.width, n_classes)\n # coarse[\"segs_raw\"] = c_segmentation_raw.view(n, v_in, self.height, self.width, n_samples, n_classes)\n\n coarse[\"rgb\"] = c_rgb.view(n, v_in, self.height, self.width, v_render, channels)\n coarse[\"weights\"] = c_weights.view(n, v_in, self.height, self.width, c_n_smps)\n coarse[\"depth\"] = c_depth.view(n, v_in, self.height, self.width)\n coarse[\"invalid\"] = c_invalid.view(n, v_in, self.height, self.width, c_n_smps, v_render)\n\n fine[\"rgb\"] = f_rgb.view(n, v_in, self.height, self.width, v_render, channels)\n fine[\"weights\"] = f_weights.view(n, v_in, self.height, self.width, f_n_smps)\n fine[\"depth\"] = f_depth.view(n, v_in, self.height, self.width)\n fine[\"invalid\"] = f_invalid.view(n, v_in, self.height, self.width, f_n_smps, v_render)\n\n if \"alphas\" in coarse:\n c_alphas = coarse[\"alphas\"]\n f_alphas = fine[\"alphas\"]\n coarse[\"alphas\"] = c_alphas.view(n, v_in, self.height, self.width, c_n_smps)\n fine[\"alphas\"] = f_alphas.view(n, v_in, self.height, self.width, f_n_smps)\n\n if \"z_samps\" in coarse:\n c_z_samps = coarse[\"z_samps\"]\n f_z_samps = fine[\"z_samps\"]\n coarse[\"z_samps\"] = c_z_samps.view(n, v_in, self.height, self.width, c_n_smps)\n fine[\"z_samps\"] = f_z_samps.view(n, v_in, self.height, self.width, f_n_smps)\n\n if \"rgb_samps\" in coarse:\n c_rgb_samps = coarse[\"rgb_samps\"]\n f_rgb_samps = fine[\"rgb_samps\"]\n coarse[\"rgb_samps\"] = c_rgb_samps.view(n, v_in, self.height, self.width, c_n_smps, v_render, channels)\n fine[\"rgb_samps\"] = f_rgb_samps.view(n, v_in, self.height, self.width, f_n_smps, v_render, channels)\n\n render_dict[\"coarse\"] = coarse\n render_dict[\"fine\"] = fine\n\n if \"rgb_gt\" in render_dict:\n rgb_gt = render_dict[\"rgb_gt\"]\n render_dict[\"rgb_gt\"] = rgb_gt.view(n, v_in, self.height, self.width, channels)\n\n return render_dict" }, { "identifier": "NeRFRenderer", "path": "models/common/render/nerf.py", "snippet": "class NeRFRenderer(torch.nn.Module):\n \"\"\"\n NeRF differentiable renderer\n :param n_coarse number of coarse (binned uniform) samples\n :param n_fine number of fine (importance) samples\n :param n_fine_depth number of expected depth samples\n :param noise_std noise to add to sigma. We do not use it\n :param depth_std noise for depth samples\n :param eval_batch_size ray batch size for evaluation\n :param white_bkgd if true, background color is white; else black\n :param lindisp if to use samples linear in disparity instead of distance\n :param sched ray sampling schedule. list containing 3 lists of equal length.\n sched[0] is list of iteration numbers,\n sched[1] is list of coarse sample numbers,\n sched[2] is list of fine sample numbers\n \"\"\"\n\n def __init__(\n self,\n n_coarse=128,\n n_fine=0,\n n_fine_depth=0,\n noise_std=0.0,\n depth_std=0.01,\n eval_batch_size=100000,\n white_bkgd=False,\n lindisp=False,\n sched=None, # ray sampling schedule for coarse and fine rays\n hard_alpha_cap=False\n ):\n super().__init__()\n self.n_coarse = n_coarse\n self.n_fine = n_fine\n self.n_fine_depth = n_fine_depth\n\n self.noise_std = noise_std\n self.depth_std = depth_std\n\n self.eval_batch_size = eval_batch_size\n self.white_bkgd = white_bkgd\n self.lindisp = lindisp\n if lindisp:\n print(\"Using linear displacement rays\")\n self.using_fine = n_fine > 0\n self.sched = sched\n if sched is not None and len(sched) == 0:\n self.sched = None\n self.register_buffer(\n \"iter_idx\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.register_buffer(\n \"last_sched\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.hard_alpha_cap = hard_alpha_cap\n\n def sample_coarse(self, rays):\n \"\"\"\n Stratified sampling. Note this is different from original NeRF slightly.\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :return (B, Kc)\n \"\"\"\n device = rays.device\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n\n step = 1.0 / self.n_coarse\n B = rays.shape[0]\n z_steps = torch.linspace(0, 1 - step, self.n_coarse, device=device) # (Kc)\n z_steps = z_steps.unsqueeze(0).repeat(B, 1) # (B, Kc)\n z_steps += torch.rand_like(z_steps) * step\n if not self.lindisp: # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n return 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kc)\n\n def sample_coarse_from_dist(self, rays, weights, z_samp):\n device = rays.device\n B = rays.shape[0]\n\n num_bins = weights.shape[-1]\n num_samples = self.n_coarse\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(B, num_samples, dtype=torch.float32, device=device) # (B, Kf)\n interval_ids = torch.searchsorted(cdf, u, right=True) - 1 # (B, Kf)\n interval_ids = torch.clamp(interval_ids, 0, num_samples-1)\n interval_interp = torch.rand_like(interval_ids, dtype=torch.float32)\n\n # z_samps describe the centers of the respective histogram bins. Therefore, we have to extend them to the left and right\n if self.lindisp:\n z_samp = 1 / z_samp\n\n centers = .5 * (z_samp[:, 1:] + z_samp[:, :-1])\n interval_borders = torch.cat((z_samp[:, :1], centers, z_samp[:, -1:]), dim=-1)\n\n left_border = torch.gather(interval_borders, dim=-1, index=interval_ids)\n right_border = torch.gather(interval_borders, dim=-1, index=interval_ids+1)\n\n z_samp_new = left_border * (1 - interval_interp) + right_border * interval_interp\n\n if self.lindisp:\n z_samp_new = 1 / z_samp_new\n\n assert not torch.any(torch.isnan(z_samp_new))\n\n return z_samp_new\n\n def sample_fine(self, rays, weights):\n \"\"\"min\n Weighted stratified (importance) sample\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param weights (B, Kc)\n :return (B, Kf-Kfd)\n \"\"\"\n device = rays.device\n B = rays.shape[0]\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(\n B, self.n_fine - self.n_fine_depth, dtype=torch.float32, device=device\n ) # (B, Kf)\n inds = torch.searchsorted(cdf, u, right=True).float() - 1.0 # (B, Kf)\n inds = torch.clamp_min(inds, 0.0)\n\n z_steps = (inds + torch.rand_like(inds)) / self.n_coarse # (B, Kf)\n\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n if not self.lindisp: # Use linear sampling in depth space\n z_samp = near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n z_samp = 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def sample_fine_depth(self, rays, depth):\n \"\"\"\n Sample around specified depth\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param depth (B)\n :return (B, Kfd)\n \"\"\"\n z_samp = depth.unsqueeze(1).repeat((1, self.n_fine_depth))\n z_samp += torch.randn_like(z_samp) * self.depth_std\n # Clamp does not support tensor bounds\n z_samp = torch.max(torch.min(z_samp, rays[:, -1:]), rays[:, -2:-1])\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def composite(self, model, rays, z_samp, coarse=True, sb=0, predict_segmentation=False):\n \"\"\"\n Render RGB and depth for each ray using NeRF alpha-compositing formula,\n given sampled positions along each ray (see sample_*)\n :param model should return (B, (r, g, b, sigma)) when called with (B, (x, y, z))\n should also support 'coarse' boolean argument\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param z_samp z positions sampled for each ray (B, K)\n :param coarse whether to evaluate using coarse NeRF\n :param predict_segmentation if true also predict the semantic distribution\n :param sb super-batch dimension; 0 = disable\n :return weights (B, K), rgb (B, 3), depth (B)\n \"\"\"\n with profiler.record_function(\"renderer_composite\"):\n B, K = z_samp.shape\n\n deltas = z_samp[:, 1:] - z_samp[:, :-1] # (B, K-1)\n delta_inf = 1e10 * torch.ones_like(deltas[:, :1]) # infty (B, 1)\n # delta_inf = rays[:, -1:] - z_samp[:, -1:]\n deltas = torch.cat([deltas, delta_inf], -1) # (B, K)\n\n # (B, K, 3)\n points = rays[:, None, :3] + z_samp.unsqueeze(2) * rays[:, None, 3:6]\n points = points.reshape(-1, 3) # (B*K, 3)\n\n use_viewdirs = hasattr(model, \"use_viewdirs\") and model.use_viewdirs\n\n rgbs_all, invalid_all, sigmas_all, segs_all = [], [], [], []\n if sb > 0:\n points = points.reshape(\n sb, -1, 3\n ) # (SB, B'*K, 3) B' is real ray batch size\n eval_batch_size = (self.eval_batch_size - 1) // sb + 1\n eval_batch_dim = 1\n else:\n eval_batch_size = self.eval_batch_size\n eval_batch_dim = 0\n\n split_points = torch.split(points, eval_batch_size, dim=eval_batch_dim)\n if use_viewdirs:\n dim1 = K\n viewdirs = rays[:, None, 3:6].expand(-1, dim1, -1) # (B, K, 3)\n if sb > 0:\n viewdirs = viewdirs.reshape(sb, -1, 3) # (SB, B'*K, 3)\n else:\n viewdirs = viewdirs.reshape(-1, 3) # (B*K, 3)\n split_viewdirs = torch.split(\n viewdirs, eval_batch_size, dim=eval_batch_dim\n )\n for pnts, dirs in zip(split_points, split_viewdirs):\n rgbs, invalid, sigmas = model(pnts, coarse=coarse, viewdirs=dirs)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n else:\n for pnts in split_points:\n if predict_segmentation:\n rgbs, invalid, sigmas, segs = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n segs_all.append(segs)\n else:\n rgbs, invalid, sigmas = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n points = None\n viewdirs = None\n # (B*K, 4) OR (SB, B'*K, 4)\n rgbs = torch.cat(rgbs_all, dim=eval_batch_dim)\n invalid = torch.cat(invalid_all, dim=eval_batch_dim)\n sigmas = torch.cat(sigmas_all, dim=eval_batch_dim)\n\n if predict_segmentation:\n segs = torch.cat(segs_all, dim=eval_batch_dim)\n segs = segs.reshape(B, K, -1) # (B, K, n_classes)\n\n rgbs = rgbs.reshape(B, K, -1) # (B, K, 4 or 5)\n invalid = invalid.reshape(B, K, -1)\n sigmas = sigmas.reshape(B, K)\n\n if self.training and self.noise_std > 0.0:\n sigmas = sigmas + torch.randn_like(sigmas) * self.noise_std\n\n alphas = 1 - torch.exp(-deltas.abs() * torch.relu(sigmas)) # (B, K) (delta should be positive anyways)\n\n if self.hard_alpha_cap:\n alphas[:, -1] = 1\n\n deltas = None\n sigmas = None\n alphas_shifted = torch.cat(\n [torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1\n ) # (B, K+1) = [1, a1, a2, ...]\n T = torch.cumprod(alphas_shifted, -1) # (B)\n weights = alphas * T[:, :-1] # (B, K)\n # alphas = None\n alphas_shifted = None\n\n rgb_final = torch.sum(weights.unsqueeze(-1) * rgbs, -2) # (B, 3)\n depth_final = torch.sum(weights * z_samp, -1) # (B)\n\n\n\n if self.white_bkgd:\n # White background\n pix_alpha = weights.sum(dim=1) # (B), pixel alpha\n rgb_final = rgb_final + 1 - pix_alpha.unsqueeze(-1) # (B, 3)\n\n if predict_segmentation:\n segs_final = torch.sum(weights.unsqueeze(-1) * segs, dim=-2) # (B, n_classes)\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs,\n # segs,\n segs_final\n )\n else:\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs\n )\n\n def forward(\n self, model, rays, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, predict_segmentation=False, sample_from_dist=None):\n \"\"\"\n :model nerf model, should return (SB, B, (r, g, b, sigma))\n when called with (SB, B, (x, y, z)), for multi-object:\n SB = 'super-batch' = size of object batch,\n B = size of per-object ray batch.\n Should also support 'coarse' boolean argument for coarse NeRF.\n :param rays ray spec [origins (3), directions (3), near (1), far (1)] (SB, B, 8)\n :param want_weights if true, returns compositing weights (SB, B, K)\n :param predict_segmentation if true, return the segmentation class distribution for each pixel\n :return render dict\n \"\"\"\n with profiler.record_function(\"renderer_forward\"):\n if self.sched is not None and self.last_sched.item() > 0:\n self.n_coarse = self.sched[1][self.last_sched.item() - 1]\n self.n_fine = self.sched[2][self.last_sched.item() - 1]\n\n assert len(rays.shape) == 3\n superbatch_size = rays.shape[0]\n rays = rays.reshape(-1, 8) # (SB * B, 8)\n\n if sample_from_dist is None:\n z_coarse = self.sample_coarse(rays) # (B, Kc)\n else:\n prop_weights, prop_z_samp = sample_from_dist\n n_samples = prop_weights.shape[-1]\n prop_weights = prop_weights.reshape(-1, n_samples)\n prop_z_samp = prop_z_samp.reshape(-1, n_samples)\n z_coarse = self.sample_coarse_from_dist(rays, prop_weights, prop_z_samp)\n z_coarse, _ = torch.sort(z_coarse, dim=-1)\n\n coarse_composite = self.composite(\n model, rays, z_coarse, coarse=True, sb=superbatch_size, predict_segmentation=predict_segmentation\n )\n\n outputs = DotMap(\n coarse=self._format_outputs(\n coarse_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas,\n want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps, want_segmentation=predict_segmentation\n ),\n )\n\n if self.using_fine:\n all_samps = [z_coarse]\n if self.n_fine - self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine(rays, coarse_composite[0].detach())\n ) # (B, Kf - Kfd)\n if self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine_depth(rays, coarse_composite[2])\n ) # (B, Kfd)\n z_combine = torch.cat(all_samps, dim=-1) # (B, Kc + Kf)\n z_combine_sorted, argsort = torch.sort(z_combine, dim=-1)\n fine_composite = self.composite(\n model, rays, z_combine_sorted, coarse=False, sb=superbatch_size,\n )\n outputs.fine = self._format_outputs(\n fine_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas, want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps\n )\n\n return outputs\n\n def _format_outputs(\n self, rendered_outputs, superbatch_size, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, want_segmentation=False\n ):\n if want_segmentation:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps, segs_final = rendered_outputs\n else:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps = rendered_outputs\n\n n_smps = weights.shape[-1]\n out_d_rgb = rgb_final.shape[-1]\n out_d_i = invalid.shape[-1]\n\n if superbatch_size > 0:\n rgb_final = rgb_final.reshape(superbatch_size, -1, out_d_rgb)\n depth = depth.reshape(superbatch_size, -1)\n weights = weights.reshape(superbatch_size, -1, n_smps)\n alphas = alphas.reshape(superbatch_size, -1, n_smps)\n invalid = invalid.reshape(superbatch_size, -1, n_smps, out_d_i)\n z_samps = z_samps.reshape(superbatch_size, -1, n_smps)\n rgb_samps = rgb_samps.reshape(superbatch_size, -1, n_smps, out_d_rgb)\n\n if want_segmentation:\n out_segs = segs_final.shape[-1]\n segs_final = segs_final.reshape(superbatch_size, -1, out_segs)\n\n ret_dict = DotMap(rgb=rgb_final, depth=depth, invalid=invalid)\n if want_weights:\n ret_dict.weights = weights\n if want_alphas:\n ret_dict.alphas = alphas\n if want_z_samps:\n ret_dict.z_samps = z_samps\n if want_rgb_samps:\n ret_dict.rgb_samps = rgb_samps\n if want_segmentation:\n ret_dict.segs = segs_final\n # ret_dict.segs_raw = segs_raw\n return ret_dict\n\n def sched_step(self, steps=1):\n \"\"\"\n Called each training iteration to update sample numbers\n according to schedule\n \"\"\"\n if self.sched is None:\n return\n self.iter_idx += steps\n while (\n self.last_sched.item() < len(self.sched[0])\n and self.iter_idx.item() >= self.sched[0][self.last_sched.item()]\n ):\n self.n_coarse = self.sched[1][self.last_sched.item()]\n self.n_fine = self.sched[2][self.last_sched.item()]\n print(\n \"INFO: NeRF sampling resolution changed on schedule ==> c\",\n self.n_coarse,\n \"f\",\n self.n_fine,\n )\n self.last_sched += 1\n\n @classmethod\n def from_conf(cls, conf, white_bkgd=False, eval_batch_size=100000):\n return cls(\n conf.get(\"n_coarse\", 128),\n conf.get(\"n_fine\", 0),\n n_fine_depth=conf.get(\"n_fine_depth\", 0),\n noise_std=conf.get(\"noise_std\", 0.0),\n depth_std=conf.get(\"depth_std\", 0.01),\n white_bkgd=conf.get(\"white_bkgd\", white_bkgd),\n lindisp=conf.get(\"lindisp\", True),\n eval_batch_size=conf.get(\"eval_batch_size\", eval_batch_size),\n sched=conf.get(\"sched\", None),\n hard_alpha_cap=conf.get(\"hard_alpha_cap\", False)\n )\n\n def bind_parallel(self, net, gpus=None, simple_output=False):\n \"\"\"\n Returns a wrapper module compatible with DataParallel.\n Specifically, it renders rays with this renderer\n but always using the given network instance.\n Specify a list of GPU ids in 'gpus' to apply DataParallel automatically.\n :param net A PixelNeRF network\n :param gpus list of GPU ids to parallize to. If length is 1,\n does not parallelize\n :param simple_output only returns rendered (rgb, depth) instead of the \n full render output map. Saves data tranfer cost.\n :return torch module\n \"\"\"\n wrapped = _RenderWrapper(net, self, simple_output=simple_output)\n if gpus is not None and len(gpus) > 1:\n print(\"Using multi-GPU\", gpus)\n wrapped = torch.nn.DataParallel(wrapped, gpus, dim=1)\n return wrapped" } ]
import argparse import sys import matplotlib.pyplot as plt import logging import subprocess import yaml import cv2 import os import numpy as np import pickle import torch import torch.nn.functional as F import matplotlib.pyplot as plt from omegaconf import open_dict from scripts.benchmarks.sscbench.generate_ply_sequence import get_cam_k from scripts.benchmarks.sscbench.point_utils import read_calib, generate_point_grid, get_fov_mask from scripts.voxel.gen_voxelgrid_npy import save_as_voxel_ply from pathlib import Path from tqdm import tqdm from torch import nn from hydra import compose, initialize from models.bts.model import BTSNet, ImageRaySampler from models.common.render import NeRFRenderer from sscbench_dataset import SSCBenchDataset from pathlib import Path
14,839
device = f'cuda:0' # DO NOT TOUCH OR YOU WILL BREAK RUNS (should be None) gpu_id = None if gpu_id is not None: print("GPU ID: " + str(gpu_id)) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) if torch.cuda.is_available(): torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = True logging.basicConfig(level=logging.INFO) def main(): parser = argparse.ArgumentParser("SSCBenchmark Output generation") parser.add_argument("--sscbench_data_root", "-ssc", type=str) parser.add_argument("--voxel_gt_path", "-vgt", type=str) parser.add_argument("--resolution", "-r", default=(192, 640)) parser.add_argument("--checkpoint", "-cp", type=str, required=True) parser.add_argument("--full", "-f", action="store_true") args = parser.parse_args() sscbench_data_root = args.sscbench_data_root voxel_gt_path = args.voxel_gt_path resolution = args.resolution cp_path = args.checkpoint full_evaluation = args.full if FULL_EVAL: full_evaluation = True logging.info("Setting up dataset") with open("label_maps.yaml", "r") as f: label_maps = yaml.safe_load(f) # pickle the dataset so we don't have to wait all the time if os.path.isfile("dataset.pkl") and not RELOAD_DATASET: logging.info("Loading dataset from dataset.pkl file.") with open("dataset.pkl", "rb") as f: dataset = pickle.load(f) else: logging.info("Generating the dataset and dumping it to dataset.pkl") dataset = SSCBenchDataset( data_path=sscbench_data_root, voxel_gt_path=voxel_gt_path, sequences=(9,), target_image_size=resolution, return_stereo=False, frame_count=1, color_aug=False, ) if DATASET_LENGTH and not full_evaluation: dataset.length = DATASET_LENGTH with open("dataset.pkl", 'wb') as f: pickle.dump(dataset, f) logging.info("Setting up the model...") config_path = "exp_kitti_360" cp_path = Path(cp_path) cp_path = next(cp_path.glob("training*.pt")) initialize(version_base=None, config_path="../../../configs", job_name="gen_sscbench_outputs") config = compose(config_name=config_path, overrides=[]) logging.info('Loading checkpoint') cp = torch.load(cp_path, map_location=device) with open_dict(config): config["renderer"]["hard_alpha_cap"] = True config["model_conf"]["code_mode"] = "z" # config["model_conf"]["z_near"] = 8 config["model_conf"]["mlp_coarse"]["n_blocks"] = 0 config["model_conf"]["mlp_coarse"]["d_hidden"] = 64 config["model_conf"]["encoder"]["d_out"] = 64 config["model_conf"]["encoder"]["type"] = "monodepth2" config["model_conf"]["grid_learn_empty"] = False config["model_conf"]["sample_color"] = True # stuff for segmentation config["model_conf"]["segmentation_mode"] = 'panoptic_deeplab' net = BTSNet(config["model_conf"]) renderer = NeRFRenderer.from_conf(config["renderer"]) renderer = renderer.bind_parallel(net, gpus=None).eval() renderer.renderer.n_coarse = 64 renderer.renderer.lindisp = True class _Wrapper(nn.Module): def __init__(self): super().__init__() self.renderer = renderer _wrapper = _Wrapper() _wrapper.load_state_dict(cp["model"], strict=False) renderer.to(device) renderer.eval() logging.info("Loading the Lidar to Camera matrices...") calib = read_calib() T_velo_2_cam = calib["Tr"] logging.info("Generating the point cloud...") pts, _ = generate_point_grid(vox_origin=np.array([0, -25.6, -2]), scene_size=(51.2, 51.2, 6.4), voxel_size=VOXEL_SIZE, cam_E=T_velo_2_cam,
sys.path.append(".") sys.path.extend("../../../") RELOAD_DATASET = True DATASET_LENGTH = 10 FULL_EVAL = True SAMPLE_EVERY = None SAMPLE_OFFSET = 2 # SAMPLE_RANGE = list(range(1000, 1600)) SAMPLE_RANGE = None SIZE = 51.2 # Can be: 51.2, 25.6, 12.8 SIZES = (12.8, 25.6, 51.2) VOXEL_SIZE = 0.1 # Needs: 0.2 % VOXEL_SIZE == 0 USE_ADDITIONAL_INVALIDS = True TEST_ALPHA_CUTOFFS = False SEARCH_VALUES = [10e-1, 10e-2, 10e-3, 10e-4, 10e-5, 10e-6, 10e-7] SIGMA_CUTOFF = 0.1 USE_ALPHA_WEIGHTING = True USE_GROW = True CREATE_SIGMA_TRADEOFF_PLOT = True SIGMA_VALUES = [1, 0.5, 0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.0025, 0.001] PLOT_ALL_IMAGES = False GENERATE_PLY_FILES = False PLY_ONLY_FOV = True PLY_IDS = list(range(1000, 1600)) PLY_PATH = Path("/storage/slurm/hayler/bts/voxel_outputs/seq1/s4c") PLY_SIZES = [25.6, 51.2] GENERATE_STATISTICS = False if GENERATE_PLY_FILES: assert (not USE_GROW) and (not USE_ADDITIONAL_INVALIDS) and VOXEL_SIZE == 0.1 # make the necessary dirs for size in PLY_SIZES: if not os.path.exists(PLY_PATH / str(int(size))): os.makedirs(PLY_PATH / str(int(size))) # Setup of CUDA device and logging os.system("nvidia-smi") device = f'cuda:0' # DO NOT TOUCH OR YOU WILL BREAK RUNS (should be None) gpu_id = None if gpu_id is not None: print("GPU ID: " + str(gpu_id)) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) if torch.cuda.is_available(): torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = True logging.basicConfig(level=logging.INFO) def main(): parser = argparse.ArgumentParser("SSCBenchmark Output generation") parser.add_argument("--sscbench_data_root", "-ssc", type=str) parser.add_argument("--voxel_gt_path", "-vgt", type=str) parser.add_argument("--resolution", "-r", default=(192, 640)) parser.add_argument("--checkpoint", "-cp", type=str, required=True) parser.add_argument("--full", "-f", action="store_true") args = parser.parse_args() sscbench_data_root = args.sscbench_data_root voxel_gt_path = args.voxel_gt_path resolution = args.resolution cp_path = args.checkpoint full_evaluation = args.full if FULL_EVAL: full_evaluation = True logging.info("Setting up dataset") with open("label_maps.yaml", "r") as f: label_maps = yaml.safe_load(f) # pickle the dataset so we don't have to wait all the time if os.path.isfile("dataset.pkl") and not RELOAD_DATASET: logging.info("Loading dataset from dataset.pkl file.") with open("dataset.pkl", "rb") as f: dataset = pickle.load(f) else: logging.info("Generating the dataset and dumping it to dataset.pkl") dataset = SSCBenchDataset( data_path=sscbench_data_root, voxel_gt_path=voxel_gt_path, sequences=(9,), target_image_size=resolution, return_stereo=False, frame_count=1, color_aug=False, ) if DATASET_LENGTH and not full_evaluation: dataset.length = DATASET_LENGTH with open("dataset.pkl", 'wb') as f: pickle.dump(dataset, f) logging.info("Setting up the model...") config_path = "exp_kitti_360" cp_path = Path(cp_path) cp_path = next(cp_path.glob("training*.pt")) initialize(version_base=None, config_path="../../../configs", job_name="gen_sscbench_outputs") config = compose(config_name=config_path, overrides=[]) logging.info('Loading checkpoint') cp = torch.load(cp_path, map_location=device) with open_dict(config): config["renderer"]["hard_alpha_cap"] = True config["model_conf"]["code_mode"] = "z" # config["model_conf"]["z_near"] = 8 config["model_conf"]["mlp_coarse"]["n_blocks"] = 0 config["model_conf"]["mlp_coarse"]["d_hidden"] = 64 config["model_conf"]["encoder"]["d_out"] = 64 config["model_conf"]["encoder"]["type"] = "monodepth2" config["model_conf"]["grid_learn_empty"] = False config["model_conf"]["sample_color"] = True # stuff for segmentation config["model_conf"]["segmentation_mode"] = 'panoptic_deeplab' net = BTSNet(config["model_conf"]) renderer = NeRFRenderer.from_conf(config["renderer"]) renderer = renderer.bind_parallel(net, gpus=None).eval() renderer.renderer.n_coarse = 64 renderer.renderer.lindisp = True class _Wrapper(nn.Module): def __init__(self): super().__init__() self.renderer = renderer _wrapper = _Wrapper() _wrapper.load_state_dict(cp["model"], strict=False) renderer.to(device) renderer.eval() logging.info("Loading the Lidar to Camera matrices...") calib = read_calib() T_velo_2_cam = calib["Tr"] logging.info("Generating the point cloud...") pts, _ = generate_point_grid(vox_origin=np.array([0, -25.6, -2]), scene_size=(51.2, 51.2, 6.4), voxel_size=VOXEL_SIZE, cam_E=T_velo_2_cam,
cam_k=get_cam_k())
0
2023-11-12 21:53:27+00:00
24k
newcastleuniversity/DISPEL
dispel/processing/extract.py
[ { "identifier": "EntityType", "path": "dispel/data/core.py", "snippet": "class ReadingSchema:\nclass Evaluation(Epoch):\nclass Session(Epoch):\nclass Reading(FlagMixIn):\n def __init__(\n self,\n *args,\n uuid: str,\n finished: Optional[bool] = None,\n exit_reason: Optional[str] = None,\n user_id: Optional[str] = None,\n **kwargs,\n ):\n def to_dict(self):\n def __init__(\n self,\n *args,\n uuid: Optional[str] = None,\n evaluation_codes: Optional[Iterable[str]] = None,\n **kwargs,\n ):\n def __init__(\n self,\n evaluation: Evaluation,\n session: Optional[Session] = None,\n levels: Optional[Iterable[Level]] = None,\n measure_set: Optional[MeasureSet] = None,\n schema: Optional[ReadingSchema] = None,\n date: Any = None,\n device: Optional[Device] = None,\n ):\n def get_level(self, level_id: Optional[LevelIdType] = None) -> Level:\n def __repr__(self) -> str:\n def __iter__(self) -> Iterable[Tuple[LevelIdType, Level]]:\n def __len__(self) -> int:\n def empty(self) -> bool:\n def levels(self) -> ValuesView[Level]:\n def level_ids(self) -> List[LevelId]:\n def has_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> bool:\n def get_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> RawDataSet:\n def get_measure_set(self, level_id: Optional[LevelIdType] = None) -> MeasureSet:\n def get_merged_measure_set(self) -> MeasureSet:\n def set(self, value, **kwargs):\n def _get_level(self, level: Optional[Union[LevelIdType, Level]] = None) -> Level:\n def _measure_set(\n self,\n value: MeasureSet,\n level: Optional[Union[LevelIdType, Level]] = None,\n ):\n def _measure_value(\n self,\n value: MeasureValue,\n level: Optional[Union[LevelIdType, Level]] = None,\n epoch: Optional[LevelEpoch] = None,\n ):\n def _raw_data_set(\n self,\n value: RawDataSet,\n level: Union[LevelIdType, Level],\n concatenate: bool = False,\n overwrite: bool = False,\n ):\n def _epoch_measure_set(self, value: LevelEpoch, level: Union[LevelIdType, Level]):\n def _level(self, value: Level):\n def _set_flag(self, value: Flag):" }, { "identifier": "Flag", "path": "dispel/data/flags.py", "snippet": "class Flag:\n \"\"\"A class for entity flag.\"\"\"\n\n #: The flag identifier (string or id format)\n id_: InitVar[FlagIdType]\n\n #: The flag identifier\n id: FlagId = field(init=False)\n\n #: The detailed reason for the flag\n reason: str\n\n #: Stop processing\n stop_processing: bool = False\n\n def __post_init__(self, id_: FlagIdType):\n if isinstance(id_, str):\n self.id = FlagId.from_str(id_)\n elif isinstance(id_, FlagId):\n self.id = id_\n else:\n raise TypeError(\n \"Flag id should be either a convertible string id or an \"\n \"FlagId class.\"\n )\n\n def __hash__(self):\n return hash((self.id, self.reason, self.stop_processing))\n\n def format(self, *args, **kwargs) -> \"Flag\":\n \"\"\"Format an flag.\"\"\"\n return Flag(\n id_=self.id.format(*args, **kwargs),\n reason=self.reason.format(*args, **kwargs),\n stop_processing=self.stop_processing,\n )" }, { "identifier": "FlagSeverity", "path": "dispel/data/flags.py", "snippet": "class FlagSeverity(AVEnum):\n \"\"\"An enumeration for flag severity.\"\"\"\n\n DEVIATION = \"deviation\"\n INVALIDATION = \"invalidation\"" }, { "identifier": "FlagType", "path": "dispel/data/flags.py", "snippet": "class FlagType(AVEnum):\n \"\"\"An enumeration for flag types.\"\"\"\n\n TECHNICAL = \"technical\"\n BEHAVIORAL = \"behavioral\"" }, { "identifier": "WrappedResult", "path": "dispel/data/flags.py", "snippet": "class WrappedResult(FlagMixIn, Generic[WrappedResultType]):\n \"\"\"A wrapped result to carry potential flags.\n\n This class provides a convenient way to add flags to values from extract steps that\n are known to be invalid. This avoids having to write a separate flag step and is\n useful in cases where the information to flag a result is only accessible in the\n extract function.\n\n Parameters\n ----------\n measure_value\n The value of the measure returned by the extraction function.\n\n Attributes\n ----------\n measure_value\n The value of the measure returned by the extraction function.\n\n Examples\n --------\n Assuming we wanted to flag measures directly inside a custom extraction function\n based on some metrics calculated, one can do\n\n >>> from dispel.processing.extract import WrappedResult\n >>> from dispel.data.flags import Flag\n >>> from typing import Union\n >>> def custom_aggregation_func(data) -> Union[WrappedResult, float]:\n ... result = data.agg('mean')\n ... if len(data) < 3:\n ... inv = Flag(\n ... reason='Not enough data point',\n ... flag_severity=FlagSeverity.INVALIDATION\n ... )\n ... result = WrappedResult(result, inv)\n ... return result\n\n During processing, the class `ExtractStep` allows the transformation function to\n output ``WrappedResult`` objects. The extract step will automatically add any flags\n present in the ``WrappedResult`` object to the measure value. The ``WrappedResult``\n class supports basic operations with other scalars or ``WrappedResult`` object:\n\n >>> from dispel.processing.extract import WrappedResult\n >>> res1 = WrappedResult(measure_value=1)\n >>> res2 = WrappedResult(measure_value=2)\n >>> melted_res = res1 + res2\n >>> melted_res2 = res1 + 1\n \"\"\"\n\n def __init__(self, measure_value: WrappedResultType, *args, **kwargs):\n self.measure_value: WrappedResultType = measure_value\n super().__init__(*args, **kwargs)\n\n def _binary_operator(\n self,\n func: Callable[[WrappedResultType, WrappedResultType], WrappedResultType],\n other: Union[WrappedResultType, \"WrappedResult[WrappedResultType]\"],\n ) -> \"WrappedResult[WrappedResultType]\":\n \"\"\"Perform binary operation on values.\"\"\"\n # Get measure value for both WrappedResult and float object\n if is_wrapped := isinstance(other, WrappedResult):\n value_other = cast(WrappedResult, other).measure_value\n else:\n value_other = other\n\n # Create a new WrappedResult object with the combination\n res = WrappedResult(\n func(self.measure_value, value_other)\n ) # type: WrappedResult[WrappedResultType]\n\n # Inherit flag from current objet\n res.add_flags(self, ignore_duplicates=True)\n\n # If other is also wrapped, inherit his flag as well\n if is_wrapped:\n res.add_flags(cast(WrappedResult, other), True)\n\n return res\n\n def _unary_operation(\n self, func: Callable[[WrappedResultType], WrappedResultType]\n ) -> \"WrappedResult[WrappedResultType]\":\n res = WrappedResult(func(self.measure_value))\n res.add_flags(self)\n return res\n\n def __abs__(self):\n return self._unary_operation(operator.abs)\n\n def __add__(\n self, other: \"WrappedResult[WrappedResultType]\"\n ) -> \"WrappedResult[WrappedResultType]\":\n return self._binary_operator(operator.add, other)\n\n def __sub__(\n self, other: \"WrappedResult[WrappedResultType]\"\n ) -> \"WrappedResult[WrappedResultType]\":\n return self._binary_operator(operator.sub, other)\n\n def __mul__(\n self, other: \"WrappedResult[WrappedResultType]\"\n ) -> \"WrappedResult[WrappedResultType]\":\n return self._binary_operator(operator.mul, other)\n\n def __truediv__(\n self, other: \"WrappedResult[WrappedResultType]\"\n ) -> \"WrappedResult[WrappedResultType]\":\n return self._binary_operator(operator.truediv, other)" }, { "identifier": "Level", "path": "dispel/data/levels.py", "snippet": "class Level(Epoch):\n \"\"\"An entity to separate sub-task inside each test (Levels).\n\n FIXME: DOC\n\n Attributes\n ----------\n context\n Contextual information about the level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n\n Parameters\n ----------\n id_\n The identifier of a given Level.\n start\n The timestamp of the beginning of the level\n end\n The timestamp of the end of the level\n context\n Contextual information about the level\n raw_data_sets\n An iterable of :class:'~dispel.data.raw.RawDataSet' of a given Level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n epochs\n An iterable of :class:`~dispel.data.measures.EpochMeasureSet` to be added to the\n level.\n \"\"\"\n\n def __init__(\n self,\n id_: Union[str, List[str], LevelId],\n start: Any,\n end: Any,\n context: Optional[Context] = None,\n raw_data_sets: Optional[Iterable[RawDataSet]] = None,\n measure_set: Optional[MeasureSet] = None,\n epochs: Optional[Iterable[LevelEpoch]] = None,\n ):\n if not isinstance(id_, LevelId):\n id_ = LevelId(id_)\n\n definition = EpochDefinition(id_=id_)\n super().__init__(start=start, end=end, definition=definition)\n\n self.context = context or Context()\n self.measure_set = measure_set or MeasureSet()\n\n # create dictionary of raw data sets\n self._raw_data_sets: Dict[str, RawDataSet] = {}\n\n # set raw data sets if arg is provided\n if raw_data_sets:\n for raw_data_set in raw_data_sets:\n self.set(raw_data_set)\n\n # create data frame for each epoch\n self._epochs = pd.DataFrame(columns=[\"definition_id\", \"start\", \"end\", \"epoch\"])\n if epochs:\n for epoch in epochs:\n self.set(epoch)\n\n @property\n def id(self) -> LevelId:\n \"\"\"Get the ID of the level from its definition.\n\n Returns\n -------\n LevelId\n The ID of the definition provided via `definition`.\n \"\"\"\n assert self.definition is not None, \"Require definition to access id\"\n return cast(LevelId, self.definition.id)\n\n @id.setter\n def id(self, value: Union[str, DefinitionId]):\n \"\"\"Set the ID of the level's definition.\n\n Parameters\n ----------\n value\n The ID to be set.\n \"\"\"\n assert self.definition is not None, \"Require definition to set id\"\n self.definition.id = value # type: ignore\n\n def __hash__(self):\n return hash(self.id)\n\n def __repr__(self):\n return f\"<Level: {self.id} ({self.flag_count_repr})>\"\n\n @property\n def raw_data_sets(self) -> List[RawDataSet]:\n \"\"\"Get all raw data sets.\"\"\"\n return list(self._raw_data_sets.values())\n\n def has_raw_data_set(self, id_: str) -> bool:\n \"\"\"Return ``True`` if the level contains the desired raw data set.\"\"\"\n return id_ in self._raw_data_sets\n\n def get_raw_data_set(self, id_: str) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id.\n\n Parameters\n ----------\n id_\n The id of the raw data set to be returned\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id\n\n Raises\n ------\n ValueError\n If the given id does not correspond to any existing raw data set within the\n level.\n \"\"\"\n if id_ not in self._raw_data_sets:\n raise ValueError(\n f'Unknown data set with id: \"{id_}\" for level_id == \"{self.id}\" '\n f\"please provide an id within {list(self._raw_data_sets.keys())}\"\n )\n\n return self._raw_data_sets[id_]\n\n @property\n def epochs(self) -> List[LevelEpoch]:\n \"\"\"Get all epoch measure sets.\"\"\"\n return self._epochs[\"epoch\"].tolist()\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a level.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n @set.register(MeasureSet)\n def _set_measure_set(self, value: MeasureSet):\n self.measure_set += value\n\n @set.register(MeasureValue)\n def _set_measure_value(self, value: MeasureValue):\n self.measure_set.set(value)\n\n @set.register(RawDataSet)\n def _set_raw_data_set(\n self, value: RawDataSet, concatenate: bool = False, overwrite: bool = False\n ):\n if overwrite and concatenate:\n raise ValueError(\n \"You cannot both concatenate and overwrite an existing raw data set. \"\n \"Only one of these arguments must be set to ``True``.\"\n )\n\n if (id_ := value.id) in self._raw_data_sets: # pylint: disable=all\n if concatenate:\n value = value.concat(self.get_raw_data_set(id_))\n elif not overwrite:\n raise RawDataSetAlreadyExists(\n id_, self.id, \"Use overwrite=True to overwrite\"\n )\n\n self._raw_data_sets[id_] = value\n\n @set.register(LevelEpoch)\n def _set_epoch(self, value: LevelEpoch):\n new_index = len(self._epochs)\n self._epochs.loc[new_index] = pd.Series(\n dict(\n definition_id=value.id if value.definition else None,\n start=value.start,\n end=value.end,\n epoch=value,\n )\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)" }, { "identifier": "MeasureId", "path": "dispel/data/measures.py", "snippet": "class MeasureId(DefinitionId):\n \"\"\"The definition of a measure id for a task.\n\n Parameters\n ----------\n task_name\n The name and abbreviation of the task. Note that if no abbreviation is provided\n the name is used directly in the id.\n measure_name\n The name of the measure and its abbreviation.\n modalities\n The modalities and their abbreviations under which the measure is constituted.\n aggregation\n A method that was used to aggregate a sequence of the underlying measure,\n e.g., for the measure ``mean response time`` it would be ``mean``.\n\n Notes\n -----\n The abbreviations of values are passed using\n :class:`~dispel.data.values.AbbreviatedValue`. To generate the actual id the `.abbr`\n accessor is used. If one passes only strings, the class actually wraps those into\n ``AbbreviatedValue`` instances.\n\n Examples\n --------\n >>> from dispel.data.values import AbbreviatedValue as AV\n >>> from dispel.data.measures import MeasureId\n >>> MeasureId(\n ... task_name=AV('Cognitive Processing Speed', 'CPS'),\n ... measure_name=AV('reaction time', 'rt'),\n ... modalities=[AV('digit-to-digit', 'dtd')],\n ... aggregation='mean'\n ... )\n cps-dtd-rt-mean\n \"\"\"\n\n def __init__(\n self,\n task_name: Union[str, AV],\n measure_name: Union[str, AV],\n modalities: Optional[List[Union[str, AV]]] = None,\n aggregation: Optional[Union[str, AV]] = None,\n ):\n self.task_name = AV.wrap(task_name)\n self.measure_name = AV.wrap(measure_name)\n self.modalities = None\n if modalities:\n self.modalities = list(map(AV.wrap, modalities))\n self.aggregation = AV.wrap(aggregation) if aggregation else None\n\n id_ = _join_not_none(\n \"-\",\n [\n self.task_name.abbr.lower(),\n \"_\".join(map(lambda x: x.abbr.lower(), self.modalities))\n if self.modalities\n else None,\n self.measure_name.abbr.lower(),\n self.aggregation.abbr.lower() if self.aggregation else None,\n ],\n )\n\n super().__init__(id_)\n\n @classmethod\n def from_str(cls, value: str) -> DefinitionId:\n \"\"\"See :meth:`dispel.data.values.DefinitionId.from_str`.\n\n Parameters\n ----------\n value\n The string from which the definition id is to be constructed.\n\n Raises\n ------\n NotImplementedError\n Always raised. This method is not implemented since there is no unambiguous\n parsing of task ids.\n \"\"\"\n raise NotImplementedError(\"Not unambiguous parsing of ids possible\")" }, { "identifier": "MeasureSet", "path": "dispel/data/measures.py", "snippet": "class MeasureSet(ValueSet):\n \"\"\"A collection of measures.\"\"\"\n\n VALUE_CLS: ClassVar[Type[Value]] = MeasureValue\n\n @classmethod\n def from_data_frame(cls, data: pd.DataFrame) -> \"MeasureSet\":\n \"\"\"Create a MeasureSet from a data frame.\n\n Parameters\n ----------\n data\n A data frame containing information about measures\n\n Returns\n -------\n MeasureSet\n A measure set derived from the provided data frame.\n \"\"\"\n return cls(data.apply(row_to_value, axis=1).to_list())\n\n def to_list(self, stringify: bool = False) -> List[Dict[str, Optional[Any]]]:\n \"\"\"Convert measure set to a list of measure dictionaries.\n\n Parameters\n ----------\n stringify\n ``True`` if all dictionary values are converted to strings. ``False``\n otherwise.\n\n Returns\n -------\n List[Dict[str, Optional[Any]]]\n A dictionary summarizing measure value information.\n \"\"\"\n return [\n cast(self.VALUE_CLS, measure).to_dict(stringify) # type: ignore\n for measure in self.values()\n ]" }, { "identifier": "MeasureValue", "path": "dispel/data/measures.py", "snippet": "class MeasureValue(FlagMixIn, Value):\n \"\"\"A measure value.\"\"\"\n\n def __repr__(self):\n return (\n f\"<MeasureValue ({self.definition}): {self.value} \"\n f\"({self.flag_count_repr})>\"\n )\n\n @staticmethod\n def _to_string(value):\n return \"\" if value is None else str(value)\n\n def to_dict(self, stringify: bool = False) -> Dict[str, Optional[Any]]:\n \"\"\"Get a dictionary representation of measure information.\n\n Parameters\n ----------\n stringify\n ``True`` if all dictionary values are converted to strings. ``False``\n otherwise.\n\n Returns\n -------\n Dict[str, Optional[Any]]\n A dictionary summarizing measure value information.\n \"\"\"\n measure_min, measure_max = None, None\n if isinstance(self.definition.validator, RangeValidator):\n measure_min = self.definition.validator.lower_bound\n measure_max = self.definition.validator.upper_bound\n\n if stringify:\n value = str(self.value)\n measure_min = self._to_string(measure_min)\n measure_max = self._to_string(measure_max)\n else:\n value = self.value\n\n return dict(\n measure_id=str(self.id),\n measure_name=self.definition.name,\n measure_value=value,\n measure_unit=self.definition.unit,\n measure_type=self.definition.data_type,\n measure_min=measure_min,\n measure_max=measure_max,\n )" }, { "identifier": "MeasureValueDefinition", "path": "dispel/data/measures.py", "snippet": "class MeasureValueDefinition(ValueDefinition):\n \"\"\"The definition of measures from tasks.\n\n Parameters\n ----------\n task_name\n The full name of the task and its abbreviation, e.g., ``Cognitive Processing\n Speed test`` and ``CPS`` passed using\n :class:`~dispel.data.values.AbbreviatedValue`.\n measure_name\n The name of the measure, e.g. ``reaction time`` and its abbreviation passed\n using :class:`~dispel.data.values.AbbreviatedValue`. Note that aggregation\n methods are specified in ``aggregation`` and should not be direclty part of the\n measure name.\n unit\n See :class:`~dispel.data.values.ValueDefinition`.\n description\n See :class:`~dispel.data.values.ValueDefinition`.\n data_type\n See :class:`~dispel.data.values.ValueDefinition`.\n validator\n See :class:`~dispel.data.values.ValueDefinition`.\n modalities\n The modalities of the tasks, i.e. if there is more than one variant of the task.\n An example would be the ``digit-to-digit`` and ``symbol-to-digit`` or\n ``predefined key 1``, ``predefined key 2`` and ``random key`` variants of the\n CPS test. Abbreviations of the modalities can be passed using\n :class:`~dispel.data.values.AbbreviatedValue`.\n aggregation\n If the measure is the result of an aggregation, the method that was used to\n aggregate. E.g. for ``mean response time`` it would be ``mean``. Abbreviations\n are passed using :class:`~dispel.data.values.AbbreviatedValue`.\n precision\n See :class:`~dispel.data.values.ValueDefinition`.\n\n Examples\n --------\n >>> from dispel.data.values import AbbreviatedValue as AV\n >>> from dispel.data.measures import MeasureValueDefinition\n >>> from dispel.data.validators import RangeValidator\n >>> MeasureValueDefinition(\n ... task_name = AV('Cognitive Processing Speed test', 'CPS'),\n ... measure_name = AV('response time', 'rt'),\n ... unit = 's',\n ... description = 'The mean time to respond to a presented stimulus',\n ... data_type = 'float64',\n ... validator = RangeValidator(lower_bound=0),\n ... modalities = [\n ... AV('digit-to-digit', 'dtd'),\n ... AV('predefined key 1', 'key1')\n ... ],\n ... aggregation = 'mean'\n ... )\n <MeasureValueDefinition: cps-dtd_key1-rt-mean (CPS digit-to-digit ...>\n \"\"\"\n\n def __init__(\n self,\n task_name: Union[str, AV],\n measure_name: Union[str, AV],\n unit: Optional[str] = None,\n description: Optional[str] = None,\n data_type: Optional[str] = None,\n validator: Optional[Callable[[Any], None]] = None,\n modalities: Optional[List[Union[str, AV]]] = None,\n aggregation: Optional[Union[str, AV]] = None,\n precision: Optional[int] = None,\n ):\n self.task_name = AV.wrap(task_name)\n self.measure_name = AV.wrap(measure_name)\n self.modalities = None\n if modalities:\n self.modalities = list(map(AV.wrap, modalities))\n self.aggregation = AV.wrap(aggregation) if aggregation else None\n\n id_ = MeasureId(\n task_name=self.task_name,\n measure_name=self.measure_name,\n modalities=self.modalities,\n aggregation=aggregation,\n )\n\n name = _join_not_none(\n \" \",\n [\n self.task_name.abbr.upper(),\n \" \".join(map(str, self.modalities)) if self.modalities else None,\n self.aggregation if self.aggregation else None,\n self.measure_name,\n ],\n )\n\n super().__init__(\n id_=id_,\n name=name,\n unit=unit,\n description=description,\n data_type=data_type,\n validator=validator,\n precision=precision,\n )" }, { "identifier": "AbbreviatedValue", "path": "dispel/data/values.py", "snippet": "class AbbreviatedValue:\n \"\"\"An abbreviated value.\n\n Examples\n --------\n This class allows to consistently handle abbreviated terms. Assuming you have a name\n of an assessment, e.g. `Cognitive Processing Speed` test and the respective\n abbreviation would be `CPS`, then you can create an abbreviated value like this:\n\n >>> from dispel.data.values import AbbreviatedValue as AV\n >>> value = AV('Cognitive Processing Speed test', 'CPS')\n >>> value\n Cognitive Processing Speed test (CPS)\n\n While this seems like a lot of overhead, it comes in handy when describing value\n definitions or higher-level abstractions, such as measure definitions.\n\n Parameters\n ----------\n value\n The full description of the value\n abbr\n The abbreviated form of the value\n\n Attributes\n ----------\n value\n The full description of the value\n \"\"\"\n\n def __init__(self, value: str, abbr: Optional[str] = None):\n self.value = value\n self._abbr = abbr\n\n @property\n def abbr(self):\n \"\"\"Get the abbreviated form of the value.\"\"\"\n return self._abbr or self.value\n\n def __str__(self):\n return self.value\n\n def __repr__(self):\n if self._abbr:\n return f\"{self.value} ({self._abbr})\"\n return self.value\n\n def __hash__(self):\n return hash((self.value, self._abbr))\n\n def __eq__(self, other):\n if isinstance(other, str):\n return self._abbr is None and self.value == other\n if isinstance(other, AbbreviatedValue):\n return self.value == other.value and self.abbr == other.abbr\n return False\n\n def __lt__(self, other):\n if not isinstance(other, AbbreviatedValue):\n raise ValueError(f\"Unsupported type in comparison: {type(other)}\")\n if self.value == other.value:\n return self.abbr < other.abbr\n return self.value < other.value\n\n def format(self, *args, **kwargs):\n \"\"\"Format an abbreviated value.\"\"\"\n return AbbreviatedValue(\n self.value.format(*args, **kwargs),\n self._abbr.format(*args, **kwargs) if self._abbr else None,\n )\n\n @classmethod\n def wrap(cls, value):\n \"\"\"Wrap a value into an abbreviated value.\n\n This is a small helper class to conveniently wrap values into an abbreviated\n value, if they are not already one.\n\n Parameters\n ----------\n value\n The value to be wrapped\n\n Returns\n -------\n AbbreviatedValue\n The passed ``value`` if it is an instance of :class:`AbbreviatedValue`. If a\n string is passed, then the string is passed as ``value`` argument to the\n constructor.\n\n Raises\n ------\n ValueError\n If the passed value is neither a string nor an instance of\n :class:`AbbreviatedValue`.\n \"\"\"\n if isinstance(value, cls):\n return value\n if isinstance(value, str):\n return cls(value)\n\n raise ValueError(f\"Can only wrap string values. Got: {type(value)}\")" }, { "identifier": "DefinitionId", "path": "dispel/data/values.py", "snippet": "class AbbreviatedValue:\nclass DefinitionId:\nclass ValueDefinition:\nclass ValueDefinitionPrototype:\nclass Value:\nclass ValueSet:\nclass AVEnum(Enum):\n def __init__(self, value: str, abbr: Optional[str] = None):\n def abbr(self):\n def __str__(self):\n def __repr__(self):\n def __hash__(self):\n def __eq__(self, other):\n def __lt__(self, other):\n def format(self, *args, **kwargs):\n def wrap(cls, value):\n def __init__(self, id_: str):\n def id(self) -> str:\n def __str__(self):\n def __eq__(self, other):\n def __hash__(self):\n def from_str(cls, value: str) -> \"DefinitionId\":\n def __init__(\n self,\n id_: DefinitionIdType,\n name: str,\n unit: Optional[str] = None,\n description: Optional[str] = None,\n data_type: Optional[str] = None,\n validator: Optional[Callable[[Any], None]] = None,\n precision: Optional[int] = None,\n ):\n def __repr__(self):\n def __hash__(self):\n def __eq__(self, other):\n def _get_parameters(cls) -> Set[str]:\n def _to_dict(self) -> Dict[str, Any]:\n def _getattr(name):\n def derive(self, **kwargs) -> \"ValueDefinition\":\n def __init__(self, **kwargs):\n def create_definition(self, **values: Any) -> ValueDefinition:\n def _can_format(value):\n def create_definitions(\n self, items: Iterable[Dict[str, Any]]\n ) -> List[ValueDefinition]:\n def derive(self, **kwargs) -> \"ValueDefinitionPrototype\":\n def __init__(self, definition: ValueDefinition, value: Any):\n def id(self) -> DefinitionId:\n def __repr__(self):\n def __hash__(self):\n def __eq__(self, other):\n def __init__(\n self,\n values: Optional[List[Any]] = None,\n definitions: Optional[List[ValueDefinition]] = None,\n ):\n def set(\n self,\n value: Any,\n definition: Optional[ValueDefinition] = None,\n overwrite: bool = False,\n ):\n def set_values(\n self,\n values: List[Any],\n definitions: Optional[List[ValueDefinition]] = None,\n overwrite: bool = True,\n ):\n def has_value(self, id_: Union[DefinitionIdType, ValueDefinition]) -> bool:\n def __contains__(self, item: Union[DefinitionIdType, ValueDefinition]) -> bool:\n def get(self, id_: Union[DefinitionIdType, ValueDefinition]) -> Value:\n def __getitem__(self, key: Union[DefinitionIdType, ValueDefinition]) -> Value:\n def get_raw_value(self, id_: Union[DefinitionIdType, ValueDefinition]) -> Any:\n def get_definition(\n self, id_: Union[DefinitionIdType, ValueDefinition]\n ) -> ValueDefinition:\n def values(self) -> ValuesView[Value]:\n def ids(self) -> KeysView[DefinitionId]:\n def definitions(self) -> Iterable[ValueDefinition]:\n def __len__(self) -> int:\n def __iter__(self):\n def empty(self) -> bool:\n def _assert_add_type(other):\n def items(self) -> ItemsView[DefinitionId, Value]:\n def _combine(self, other, overwrite):\n def __add__(self, other):\n def __iadd__(self, other):\n def __or__(self, other: \"ValueSet\") -> \"ValueSet\":\n def __ior__(self, other: \"ValueSet\") -> \"ValueSet\":\n def __eq__(self, other):\n def __new__(cls, *_args, **_kwargs): # noqa: D102\n def __init__(self, value, abbr=None):\n def __repr__(self):\n def __str__(self):\n def __int__(self):\n def __lt__(self, other):\n def abbr(self):\n def variable(self):\n def from_abbr(cls, value: str):\n def from_variable(cls, value: str):\n VALUE_CLS: ClassVar[Type[Value]] = Value" }, { "identifier": "ErrorHandling", "path": "dispel/processing/core.py", "snippet": "class ProcessingError(Exception):\nclass StopProcessingError(ProcessingError):\nclass FlagError(ProcessingError):\nclass InvalidDataError(ProcessingError):\nclass ProcessingResultBase:\nclass ProcessingResult(ProcessingResultBase):\nclass ErrorHandling(Enum):\nclass ProcessingControlResult(ProcessingResultBase):\nclass Parameter(Generic[ParameterType]):\nclass ProcessingStep:\nclass CoreProcessingStepGroup(ProcessingStep):\nclass _ChainedProcesses(CoreProcessingStepGroup):\nclass FlagReadingStep(FlagStepMixin, ProcessingStep):\n def __init__(self, message: str, step: \"ProcessingStep\"):\n def __init__(self, flag: Flag, step: \"ProcessingStep\"):\n def get_kwargs(self) -> Dict[str, Any]:\n def get_kwargs(self) -> Dict[str, Any]:\n def get_sources(self) -> Iterable[SourcesType]:\n def should_raise(self) -> bool:\n def __bool__(self) -> bool:\n def from_bool(cls, stop_processing: bool) -> \"ErrorHandling\":\n def __post_init__(self):\n def get_targets(self) -> Iterable[EntityType]:\n def from_assertion_error(\n cls,\n step: \"ProcessingStep\",\n error: AssertionError,\n level: Optional[Level] = None,\n ):\n def from_flag(\n cls,\n flag: Flag,\n step: \"ProcessingStep\",\n targets: Iterable[EntityType],\n level: Optional[Level] = None,\n ):\n def __new__(cls, id_: str, *_args, **_kwargs):\n def __init__(\n self,\n id_: str,\n default_value: Optional[ParameterType] = None,\n validator: Optional[Callable[[Any], None]] = None,\n description: Optional[str] = None,\n ):\n def id(self):\n def value(self) -> ParameterType:\n def value(self, value: ParameterType):\n def has_parameter(cls, full_id: str) -> bool:\n def set_value(cls, full_id: str, value: Any):\n def __init__(self):\n def process(self, reading: Reading, **kwargs) -> ProcessResultType:\n def assert_valid_reading(self, reading: Reading, **kwargs):\n def flag_reading(self, reading: Reading, **kwargs) -> Generator[Flag, None, None]:\n def get_reading_flag_targets(\n self, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def set_previous(self, step: \"ProcessingStep\"):\n def set_next(self, step: \"ProcessingStep\"):\n def chain(self, successor: \"ProcessingStep\") -> \"ProcessingStep\":\n def __and__(self, other):\n def get_parameters(self) -> List[Tuple[str, Parameter]]:\n def __new__(cls, *args, **kwargs):\n def __init__(self, steps: Optional[List[ProcessingStep]] = None, **kwargs):\n def set_kwargs(self, **kwargs):\n def get_kwargs(self) -> Dict[str, Any]:\n def set_steps(self, steps: List[ProcessingStep]):\n def get_steps(self) -> List[ProcessingStep]:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def chain(self, successor: \"ProcessingStep\") -> \"ProcessingStep\":\n def __init__(\n self,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n ):\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def get_reading_flag_targets(\n self, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_reading(self, reading: Reading, **kwargs) -> Generator[Flag, None, None]:\n RAISE = \"raise\"\n IGNORE = \"ignore\"" }, { "identifier": "MutateDataSetProcessingStepBase", "path": "dispel/processing/data_set.py", "snippet": "class RawDataSetProcessingResult(LevelProcessingResult):\nclass StorageError(Enum):\nclass DataSetProcessingStepProtocol(metaclass=ABCMeta):\nclass DataSetProcessingStepMixin(\n TaskMixin,\n DataSetProcessingStepProtocol,\n LevelProcessingStepProtocol,\n metaclass=ABCMeta,\n):\nclass DataSetProcessingStep(\n DataSetProcessingStepMixin, LevelProcessingStep, metaclass=ABCMeta\n):\nclass MutateDataSetProcessingStepBase(DataSetProcessingStep, metaclass=ABCMeta):\nclass FlagDataSetStep(FlagStepMixin, DataSetProcessingStep, metaclass=ABCMeta):\n def __post_init__(self):\n def overwrite(self) -> bool:\n def concatenate(self) -> bool:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\n def get_data_set_ids(self) -> Iterable[str]:\n def get_raw_data_sets(self, level: Level) -> List[RawDataSet]:\n def get_data_frames(self, level: Level) -> List[pd.DataFrame]:\n def assert_valid_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ):\n def flag_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Generator[Flag, None, None]:\n def get_data_sets_flag_targets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Iterable[EntityType]:\n def __init__(self, *args, **kwargs):\n def get_data_set_ids(self) -> Iterable[str]:\n def get_raw_data_sets(self, level: Level) -> List[RawDataSet]:\n def get_data_frames(self, level: Level) -> List[pd.DataFrame]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n level_filter: Optional[LevelFilterType] = None,\n ):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def assert_valid_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ):\n def flag_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Generator[Flag, None, None]:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\ndef transformation(_func=None, **kwargs):\n def wrapper(func):\ndef decorated_processing_function(\n func: Callable[..., Any],\n data_sets: Sequence[pd.DataFrame],\n reading: Reading,\n level: Level,\n **kwargs,\n) -> Any:\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n transform_function: Optional[Callable[..., Any]] = None,\n level_filter: Optional[LevelFilterType] = None,\n ):\n def get_transform_function(self) -> Optional[Callable[..., Any]]:\n def get_transform_functions(self) -> TransformationFunctionGeneratorType:\n def wrap_result(\n self, res: Any, level: Level, reading: Reading, **kwargs: Any\n ) -> WrapResultGeneratorType:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n level_filter: Optional[LevelFilterType] = None,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n target_ids: Optional[Union[Iterable[str], str]] = None,\n ):\n def get_target_ids(self) -> Iterable[str]:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\n def get_data_sets_flag_targets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Generator[Flag, None, None]:\n RAISE = \"raise\"\n IGNORE = \"ignore\"\n OVERWRITE = \"overwrite\"\n CONCATENATE = \"concatenate\"" }, { "identifier": "FlagStepMixin", "path": "dispel/processing/flags.py", "snippet": "class FlagStepMixin(TaskMixin, metaclass=ABCMeta):\n \"\"\"A flag mix in class.\"\"\"\n\n #: The name of the flag\n flag_name: Union[AV, str]\n\n #: The type of the flag\n flag_type: Union[FlagType, str]\n\n # The severity of the flag\n flag_severity: Union[FlagSeverity, str]\n\n #: The detailed reason of the flag\n reason: str\n\n #: The stop_processing status of the flag step\n stop_processing: bool = False\n\n #: The flagging function\n flagging_function: Optional[Callable[..., bool]] = None\n\n def __init__(self, *args, **kwargs):\n kwargs = set_attributes_from_kwargs(\n self,\n \"task_name\",\n \"flag_name\",\n \"flag_type\",\n \"flag_severity\",\n \"reason\",\n \"stop_processing\",\n \"flagging_function\",\n **kwargs,\n )\n\n self.kwargs = kwargs\n super().__init__(*args, **kwargs)\n\n def get_flag_name(self, **kwargs) -> Union[str, AV]:\n \"\"\"Get the flag name.\"\"\"\n flag_name = kwargs.get(\"flag_name\", None) or getattr(self, \"flag_name\")\n if isinstance(flag_name, (str, AV)):\n return flag_name.format(**kwargs)\n raise ValueError(\"Missing flag name.\")\n\n def get_flag_type(self, **kwargs) -> Union[str, FlagType]:\n \"\"\"Get the flag type.\"\"\"\n flag_type = kwargs.get(\"flag_type\", None) or getattr(self, \"flag_type\")\n if isinstance(flag_type, (str, FlagType)):\n return flag_type\n raise ValueError(\"Missing flag type.\")\n\n def get_flag_severity(self, **kwargs) -> Union[str, FlagSeverity]:\n \"\"\"Get the flag severity.\"\"\"\n flag_severity = kwargs.get(\"flag_severity\", None) or getattr(\n self, \"flag_severity\"\n )\n if isinstance(flag_severity, (str, FlagSeverity)):\n return flag_severity\n raise ValueError(\"Missing flag severity.\")\n\n def get_reason(self, **kwargs) -> str:\n \"\"\"Get the flag reason.\"\"\"\n reason = kwargs.get(\"reason\", None) or getattr(self, \"reason\")\n if isinstance(reason, str):\n return reason.format(**kwargs)\n raise ValueError(\"Missing flag reason.\")\n\n @abstractmethod\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n \"\"\"Get flag targets.\n\n Parameters\n ----------\n reading\n The reading to which the targets are associated.\n level\n The level associated with the targets (if needed).\n kwargs\n Keyword arguments from which the flag targets are to be extracted.\n\n Returns\n -------\n Iterable[EntityType]\n An iterable of the flag targets.\n \"\"\"\n raise NotImplementedError\n\n def get_flagging_function(self) -> Optional[Callable[..., bool]]:\n \"\"\"Get the flagging function.\"\"\"\n # unbind bound methods\n func = self.flagging_function\n if func is not None and hasattr(func, \"__func__\"):\n return func.__func__ # type: ignore\n return func\n\n def get_flagging_functions(self) -> FlaggingFunctionGeneratorType:\n \"\"\"Get all flagging functions associated with this step.\"\"\"\n if func := self.get_flagging_function():\n yield func, {}\n\n members = inspect.getmembers(self, predicate=inspect.isroutine)\n for _, func in members:\n if func is not None and hasattr(func, \"__flagging_function__\"):\n yield func, func.__flag_kwargs__ # type: ignore\n\n def set_flag_kwargs(self, **kwargs):\n \"\"\"Set keyword arguments inside flagging function.\n\n Parameters\n ----------\n kwargs\n The keyword arguments to be added inside the flagging function\n keyword arguments.\n \"\"\"\n _, parent, *_ = inspect.stack()\n getattr(self, parent.function).__flag_kwargs__.update(kwargs)\n\n def get_flag(self, **kwargs) -> Flag:\n \"\"\"Get the flag corresponding to the flag step.\"\"\"\n (all_kwargs := self.kwargs.copy()).update(kwargs)\n return Flag(\n id_=FlagId(\n task_name=self.get_task_name(**all_kwargs),\n flag_name=self.get_flag_name(**all_kwargs),\n flag_type=self.get_flag_type(**all_kwargs),\n flag_severity=self.get_flag_severity(**all_kwargs),\n ),\n reason=self.get_reason(**all_kwargs),\n stop_processing=self.stop_processing,\n )" }, { "identifier": "LevelFilterType", "path": "dispel/processing/level.py", "snippet": "class LevelProcessingResultBase:\nclass LevelProcessingResult(ProcessingResult, LevelProcessingResultBase):\nclass LevelProcessingControlResult(ProcessingControlResult, LevelProcessingResultBase):\nclass LevelFilter(ABC):\nclass LevelIdFilter(LevelFilter):\nclass DefaultLevelFilter(LevelFilter):\nclass LevelProcessingStepProtocol(metaclass=ABCMeta):\nclass LevelFilterProcessingStepMixin:\nclass LevelProcessingStep(\n LevelProcessingStepProtocol, LevelFilterProcessingStepMixin, ProcessingStep\n):\nclass FlagLevelStep(FlagStepMixin, LevelProcessingStep):\nclass ProcessingStepGroup(LevelFilterProcessingStepMixin, CoreProcessingStepGroup):\n def __post_init__(self):\n def from_assertion_error(\n cls,\n step: \"ProcessingStep\",\n error: AssertionError,\n level: Optional[Level] = None,\n ):\n def from_flag(\n cls,\n flag: Flag,\n step: \"ProcessingStep\",\n targets: Iterable[EntityType],\n level: Optional[Level] = None,\n ):\ndef _intersection(a, b):\ndef _union(a, b):\n def __call__(self, levels: Iterable[Level]) -> Set[Level]:\n def __repr__(self) -> str:\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def _combined(\n self, other: \"LevelFilter\", func: Callable[[Set, Set], Set]\n ) -> \"LevelFilter\":\n def _match(levels: Iterable[Level]) -> Set[Level]:\n def _repr() -> str:\n def __and__(self, other: \"LevelFilter\") -> \"LevelFilter\":\n def __or__(self, other: \"LevelFilter\") -> \"LevelFilter\":\n def __invert__(self) -> \"LevelFilter\":\n def _inverted_filter(levels: Iterable[Level]) -> Set[Level]:\n def _repr() -> str:\n def __init__(self, level_ids: MultipleLevelIdsType):\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def get_level_flag_targets(\n self, level: Level, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def __init__(self, *args, **kwargs):\n def get_level_filter(self) -> LevelFilter:\n def set_level_filter(self, level_filter: LevelFilterType):\n def inject_level_filter_from_step(self, step: \"LevelFilterProcessingStepMixin\"):\n def _get_level_filter(inner_self) -> LevelFilter:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def __init__(\n self,\n level_filter: Optional[LevelFilterType] = None,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n ):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def get_level_flag_targets(\n self, level: Level, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def set_steps(self, steps: List[ProcessingStep]):\n def inject_level_filter_from_step(self, step: LevelFilterProcessingStepMixin):" }, { "identifier": "TransformStepChainMixIn", "path": "dispel/processing/transform.py", "snippet": "class TransformStepChainMixIn(DataSetProcessingStepProtocol, metaclass=ABCMeta):\n \"\"\"A mixin class that allows to chain transformation steps.\n\n The basic idea is to leverage the new data set ids from the previous transform step\n as the required data set ids for the current step. This avoids having to define the\n `data_set_ids` attribute.\n \"\"\"\n\n def get_data_set_ids(self) -> Iterable[str]:\n \"\"\"Get the data set ids to be processed.\n\n This uses the new data set ids from a previous transform step if set. Otherwise,\n falls back to the default behavior of returning the set data set ids from the\n constructor or class variable.\n\n Returns\n -------\n Iterable[str]\n An iterable of data set ids.\n \"\"\"\n assert isinstance(\n self, ProcessingStep\n ), \"TransformStepChainMixIn must inherit from ProcessingStep\"\n # pylint: disable=no-member\n if isinstance(self.predecessor, TransformStep):\n return [self.predecessor.get_new_data_set_id()]\n # pylint: enable=no-member\n\n return super().get_data_set_ids() # type: ignore[safe-super]" }, { "identifier": "iqr", "path": "dispel/stats/core.py", "snippet": "def mad(data: np.ndarray, axis=None):\ndef variation(\n data: pd.Series, error: Literal[\"raise\", \"coerce\", \"omit\"] = \"coerce\"\n) -> float:\ndef variation_increase(\n data: pd.Series, error: Literal[\"raise\", \"coerce\", \"omit\"] = \"coerce\"\n) -> float:\ndef q_factory(percentile: float, name: str) -> Callable[[pd.Series], float]:\ndef freq_nan(data: pd.Series) -> float:\ndef iqr(data: pd.Series) -> float:\ndef npcv(data: pd.Series) -> float:" } ]
import inspect import math import warnings import numpy as np import pandas as pd from typing import ( Any, Callable, Dict, Generator, Iterable, List, Optional, Sequence, Tuple, Union, cast, ) from deprecated import deprecated from dispel.data.core import EntityType, Reading from dispel.data.flags import Flag, FlagSeverity, FlagType, WrappedResult from dispel.data.levels import Level from dispel.data.measures import ( MeasureId, MeasureSet, MeasureValue, MeasureValueDefinition, ) from dispel.data.values import AbbreviatedValue as AV from dispel.data.values import ( DefinitionId, DefinitionIdType, ValueDefinition, ValueDefinitionPrototype, ) from dispel.processing.core import ( ErrorHandling, ProcessingControlResult, ProcessingResult, ProcessingStep, ProcessResultType, ) from dispel.processing.data_set import ( MutateDataSetProcessingStepBase, TransformationFunctionGeneratorType, WrapResultGeneratorType, ) from dispel.processing.flags import FlagStepMixin from dispel.processing.level import LevelFilterType, LevelProcessingResult from dispel.processing.transform import TransformStepChainMixIn from dispel.stats.core import iqr, npcv, percentile_95, variation, variation_increase
14,445
Parameters ---------- data_set_ids An optional list of data set ids to be used for the transformation. See :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`. transform_function An optional function to be applied to the data sets. See :class:`~dispel.processing.data_set.MutateDataSetProcessingStepBase`. definition An optional value definition or prototype. See :class:`MeasureDefinitionMixin`. level_filter An optional filter to limit the levels being processed. See :class:`~dispel.processing.level.LevelProcessingStep`. yield_if_nan If ``True``, yield null values as measure values. Otherwise, processing will not return a measure value in case of a null result for the extraction. Examples -------- Assuming we wanted to compute the maximum value of a raw data set we can create the following step >>> from dispel.data.values import ValueDefinition >>> from dispel.processing.extract import ExtractStep >>> step = ExtractStep( ... 'data-set-id', ... lambda data: data.max(axis=0), ... ValueDefinition('maximum','Maximum value') ... ) A common approach is to define a processing step for re-use and leveraging the ``@transformation`` decorator to specify the transformation function: >>> import pandas as pd >>> from dispel.data.values import ValueDefinition >>> from dispel.processing.extract import ExtractStep >>> from dispel.processing.data_set import transformation >>> class MyExtractStep(ExtractStep): ... data_set_ids = 'data-set-id' ... definition = ValueDefinition('maximum','Maximum value') ... ... @transformation ... def _max(self, data: pd.DataFrame) -> float: ... return data.max(axis=0) Often one wants to extract multiple measures from one data set. This can be achieved by using prototypes and optional named arguments with ``@transformation``: >>> import pandas as pd >>> from dispel.data.values import ValueDefinitionPrototype >>> from dispel.processing.extract import ExtractStep >>> from dispel.processing.data_set import transformation >>> class MyExtractStep(ExtractStep): ... data_set_ids = 'data-set-id' ... definition = ValueDefinitionPrototype( ... id_='id-{agg_abbr}', ... name='{agg} value' ... ) ... ... @transformation(agg='Maximum', agg_abbr='max') ... def _max(self, data: pd.DataFrame) -> float: ... return data.max(axis=0) ... ... @transformation(agg='Minimum', agg_abbr='min') ... def _min(self, data: pd.DataFrame) -> float: ... return data.min(axis=0) """ yield_if_nan: bool = False def __init__( self, data_set_ids: Optional[Union[str, Iterable[str]]] = None, transform_function: Optional[Callable[..., Any]] = None, definition: Optional[Union[ValueDefinition, ValueDefinitionPrototype]] = None, level_filter: Optional[LevelFilterType] = None, yield_if_nan: Optional[bool] = None, ): super().__init__( definition=definition, data_set_ids=data_set_ids, transform_function=transform_function, level_filter=level_filter, ) self.yield_if_nan = yield_if_nan or self.yield_if_nan def wrap_result( self, res: Any, level: Level, reading: Reading, **kwargs: Any ) -> WrapResultGeneratorType: """Wrap the result from the processing function into a class. Parameters ---------- res Any result returned by the extraction step. If res is a :class:`~dispel.data.flags.WrappedResult`, the flag contained in the object will be automatically added to the :class:`~dispel.data.measures.MeasureValue`, hence the flagged wrapped results will always translate into flagged :class:`~dispel.data.measures.MeasureValue`. level The current level reading The current reading kwargs Additional kwargs Yields ------ LevelProcessingResult The processing result """ try: if len(res) == 0: res = math.nan warnings.warn("Extract step returned an iterable!", UserWarning) except TypeError: pass
"""Extraction functionalities for processing module.""" from __future__ import annotations class MeasureDefinitionMixin: """A mixin class for processing steps producing measure values. Parameters ---------- definition An optional value definition. If no value definition is provided, the :data:`definition` class variable will be used. Alternatively, one can overwrite :meth:`get_definition` to provide the definition. """ #: The specification of the measure definition definition: Optional[Union[ValueDefinition, ValueDefinitionPrototype]] = None def __init__(self, *args, **kwargs): definition = kwargs.pop("definition", None) self.definition = definition or self.definition super().__init__(*args, **kwargs) def get_definition(self, **kwargs) -> ValueDefinition: """Get the measure definition. Parameters ---------- kwargs Optional parameters that will be passed along to the creation of measure definitions from prototypes. See :meth:`~dispel.data.values.ValueDefinitionPrototype.create_definition` Returns ------- ValueDefinition The definition of the value """ assert ( self.definition is not None ), "Definition must be set or get_definition must be overwritten." definition = self.definition if isinstance(definition, ValueDefinitionPrototype): definition = cast(ValueDefinition, definition.create_definition(**kwargs)) return definition def get_value(self, value: Any, **kwargs) -> MeasureValue: """Get a measure value based on the definition. Parameters ---------- value The value kwargs Optional arguments passed to :meth:`get_definition`. Returns ------- MeasureValue The ``value`` wrapped with the definition from :meth:`get_definition`. """ return MeasureValue(self.get_definition(**kwargs), value) class ExtractStep( MeasureDefinitionMixin, TransformStepChainMixIn, MutateDataSetProcessingStepBase ): r"""A measure extraction processing step. This class provides a convenient way to extract a measure from one or more data sets by specifying their id, their level_ids or level filter, a transformation function and a measure value definition. Parameters ---------- data_set_ids An optional list of data set ids to be used for the transformation. See :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`. transform_function An optional function to be applied to the data sets. See :class:`~dispel.processing.data_set.MutateDataSetProcessingStepBase`. definition An optional value definition or prototype. See :class:`MeasureDefinitionMixin`. level_filter An optional filter to limit the levels being processed. See :class:`~dispel.processing.level.LevelProcessingStep`. yield_if_nan If ``True``, yield null values as measure values. Otherwise, processing will not return a measure value in case of a null result for the extraction. Examples -------- Assuming we wanted to compute the maximum value of a raw data set we can create the following step >>> from dispel.data.values import ValueDefinition >>> from dispel.processing.extract import ExtractStep >>> step = ExtractStep( ... 'data-set-id', ... lambda data: data.max(axis=0), ... ValueDefinition('maximum','Maximum value') ... ) A common approach is to define a processing step for re-use and leveraging the ``@transformation`` decorator to specify the transformation function: >>> import pandas as pd >>> from dispel.data.values import ValueDefinition >>> from dispel.processing.extract import ExtractStep >>> from dispel.processing.data_set import transformation >>> class MyExtractStep(ExtractStep): ... data_set_ids = 'data-set-id' ... definition = ValueDefinition('maximum','Maximum value') ... ... @transformation ... def _max(self, data: pd.DataFrame) -> float: ... return data.max(axis=0) Often one wants to extract multiple measures from one data set. This can be achieved by using prototypes and optional named arguments with ``@transformation``: >>> import pandas as pd >>> from dispel.data.values import ValueDefinitionPrototype >>> from dispel.processing.extract import ExtractStep >>> from dispel.processing.data_set import transformation >>> class MyExtractStep(ExtractStep): ... data_set_ids = 'data-set-id' ... definition = ValueDefinitionPrototype( ... id_='id-{agg_abbr}', ... name='{agg} value' ... ) ... ... @transformation(agg='Maximum', agg_abbr='max') ... def _max(self, data: pd.DataFrame) -> float: ... return data.max(axis=0) ... ... @transformation(agg='Minimum', agg_abbr='min') ... def _min(self, data: pd.DataFrame) -> float: ... return data.min(axis=0) """ yield_if_nan: bool = False def __init__( self, data_set_ids: Optional[Union[str, Iterable[str]]] = None, transform_function: Optional[Callable[..., Any]] = None, definition: Optional[Union[ValueDefinition, ValueDefinitionPrototype]] = None, level_filter: Optional[LevelFilterType] = None, yield_if_nan: Optional[bool] = None, ): super().__init__( definition=definition, data_set_ids=data_set_ids, transform_function=transform_function, level_filter=level_filter, ) self.yield_if_nan = yield_if_nan or self.yield_if_nan def wrap_result( self, res: Any, level: Level, reading: Reading, **kwargs: Any ) -> WrapResultGeneratorType: """Wrap the result from the processing function into a class. Parameters ---------- res Any result returned by the extraction step. If res is a :class:`~dispel.data.flags.WrappedResult`, the flag contained in the object will be automatically added to the :class:`~dispel.data.measures.MeasureValue`, hence the flagged wrapped results will always translate into flagged :class:`~dispel.data.measures.MeasureValue`. level The current level reading The current reading kwargs Additional kwargs Yields ------ LevelProcessingResult The processing result """ try: if len(res) == 0: res = math.nan warnings.warn("Extract step returned an iterable!", UserWarning) except TypeError: pass
if is_wrapped := isinstance(res, WrappedResult):
4
2023-11-14 10:06:46+00:00
24k
Jisencc/yolov5_dual_weighting
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine or triton # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {\n int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n core = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if ov_model.get_parameters()[0].get_layout().empty:\n ov_model.get_parameters()[0].set_layout(Layout('NCHW'))\n batch_dim = get_batch(ov_model)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, 'r') as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith('tensorflow')\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.ov_compiled_model(im).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.BILINEAR)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "SegmentationModel", "path": "models/yolo.py", "snippet": "class SegmentationModel(DetectionModel):\n # YOLOv5 segmentation model\n def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):\n super().__init__(cfg, ch, nc, anchors)" }, { "identifier": "Callbacks", "path": "utils/callbacks.py", "snippet": "class Callbacks:\n \"\"\"\"\n Handles all registered callbacks for YOLOv5 Hooks\n \"\"\"\n\n def __init__(self):\n # Define the available callbacks\n self._callbacks = {\n 'on_pretrain_routine_start': [],\n 'on_pretrain_routine_end': [],\n 'on_train_start': [],\n 'on_train_epoch_start': [],\n 'on_train_batch_start': [],\n 'optimizer_step': [],\n 'on_before_zero_grad': [],\n 'on_train_batch_end': [],\n 'on_train_epoch_end': [],\n 'on_val_start': [],\n 'on_val_batch_start': [],\n 'on_val_image_end': [],\n 'on_val_batch_end': [],\n 'on_val_end': [],\n 'on_fit_epoch_end': [], # fit = train + val\n 'on_model_save': [],\n 'on_train_end': [],\n 'on_params_update': [],\n 'teardown': [], }\n self.stop_training = False # set True to interrupt training\n\n def register_action(self, hook, name='', callback=None):\n \"\"\"\n Register a new action to a callback hook\n\n Args:\n hook: The callback hook name to register the action to\n name: The name of the action for later reference\n callback: The callback to fire\n \"\"\"\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n assert callable(callback), f\"callback '{callback}' is not callable\"\n self._callbacks[hook].append({'name': name, 'callback': callback})\n\n def get_registered_actions(self, hook=None):\n \"\"\"\"\n Returns all the registered actions by callback hook\n\n Args:\n hook: The name of the hook to check, defaults to all\n \"\"\"\n return self._callbacks[hook] if hook else self._callbacks\n\n def run(self, hook, *args, thread=False, **kwargs):\n \"\"\"\n Loop through the registered actions and fire all callbacks on main thread\n\n Args:\n hook: The name of the hook to check, defaults to all\n args: Arguments to receive from YOLOv5\n thread: (boolean) Run callbacks in daemon thread\n kwargs: Keyword Arguments to receive from YOLOv5\n \"\"\"\n\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n for logger in self._callbacks[hook]:\n if thread:\n threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()\n else:\n logger['callback'](*args, **kwargs)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nTQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\nLOGGING_NAME = 'yolov5'\nLOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef is_colab():\ndef is_jupyter():\ndef is_kaggle():\ndef is_docker() -> bool:\ndef is_writeable(dir, test=False):\ndef set_logging(name=LOGGING_NAME, verbose=True):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def time(self):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef methods(instance):\ndef print_args(args: Optional[dict] = None, show_file=True, show_func=False):\ndef init_seeds(seed=0, deterministic=False):\ndef intersect_dicts(da, db, exclude=()):\ndef get_default_args(func):\ndef get_latest_run(search_dir='.'):\ndef file_age(path=__file__):\ndef file_date(path=__file__):\ndef file_size(path):\ndef check_online():\n def run_once():\ndef git_describe(path=ROOT): # path must be a directory\ndef check_git_status(repo='ultralytics/yolov5', branch='master'):\ndef check_git_info(path='.'):\ndef check_python(minimum='3.8.0'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow(warn=False):\ndef check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_font(font=FONT, progress=False):\ndef check_dataset(data, autodownload=True):\ndef check_amp(model):\n def amp_allclose(model, im):\ndef yaml_load(file='data.yaml'):\ndef yaml_save(file='data.yaml', data={}):\ndef unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\ndef clip_boxes(boxes, shape):\ndef clip_segments(segments, shape):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\ndef imread(filename, flags=cv2.IMREAD_COLOR):\ndef imwrite(filename, img):\ndef imshow(path, im):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):" }, { "identifier": "ConfusionMatrix", "path": "utils/metrics.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc, conf=0.25, iou_thres=0.45):\n self.matrix = np.zeros((nc + 1, nc + 1))\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n if detections is None:\n gt_classes = labels.int()\n for gc in gt_classes:\n self.matrix[self.nc, gc] += 1 # background FN\n return\n\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(int)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[detection_classes[m1[j]], gc] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # true background\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # predicted background\n\n def tp_fp(self):\n tp = self.matrix.diagonal() # true positives\n fp = self.matrix.sum(1) - tp # false positives\n # fn = self.matrix.sum(0) - tp # false negatives (missed detections)\n return tp[:-1], fp[:-1] # remove background class\n\n @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure')\n def plot(self, normalize=True, save_dir='', names=()):\n import seaborn as sn\n\n array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)\n nc, nn = self.nc, len(names) # number of classes, names\n sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size\n labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels\n ticklabels = (names + ['background']) if labels else 'auto'\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered\n sn.heatmap(array,\n ax=ax,\n annot=nc < 30,\n annot_kws={\n 'size': 8},\n cmap='Blues',\n fmt='.2f',\n square=True,\n vmin=0.0,\n xticklabels=ticklabels,\n yticklabels=ticklabels).set_facecolor((1, 1, 1))\n ax.set_xlabel('True')\n ax.set_ylabel('Predicted')\n ax.set_title('Confusion Matrix')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n plt.close(fig)\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "box_iou", "path": "utils/metrics.py", "snippet": "def box_iou(box1, box2, eps=1e-7):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)\n inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)\n\n # IoU = inter / (area1 + area2 - inter)\n return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)" }, { "identifier": "output_to_target", "path": "utils/plots.py", "snippet": "def output_to_target(output, max_det=300):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting\n targets = []\n for i, o in enumerate(output):\n box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)\n j = torch.full((conf.shape[0], 1), i)\n targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))\n return torch.cat(targets, 0).numpy()" }, { "identifier": "plot_val_study", "path": "utils/plots.py", "snippet": "def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\n # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)\n save_dir = Path(file).parent if file else Path(dir)\n plot2 = False # plot additional results\n if plot2:\n ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:\n for f in sorted(save_dir.glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n if plot2:\n s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']\n for i in range(7):\n ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[5, 1:j],\n y[3, 1:j] * 1E2,\n '.-',\n linewidth=2,\n markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-',\n linewidth=2,\n markersize=8,\n alpha=.25,\n label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(25, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n f = save_dir / 'study.png'\n print(f'Saving {f}...')\n plt.savefig(f, dpi=300)" }, { "identifier": "create_dataloader", "path": "utils/segment/dataloaders.py", "snippet": "def create_dataloader(path,\n imgsz,\n batch_size,\n stride,\n single_cls=False,\n hyp=None,\n augment=False,\n cache=False,\n pad=0.0,\n rect=False,\n rank=-1,\n workers=8,\n image_weights=False,\n quad=False,\n prefix='',\n shuffle=False,\n mask_downsample_ratio=1,\n overlap_mask=False,\n seed=0):\n if rect and shuffle:\n LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')\n shuffle = False\n with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP\n dataset = LoadImagesAndLabelsAndMasks(\n path,\n imgsz,\n batch_size,\n augment=augment, # augmentation\n hyp=hyp, # hyperparameters\n rect=rect, # rectangular batches\n cache_images=cache,\n single_cls=single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix,\n downsample_ratio=mask_downsample_ratio,\n overlap=overlap_mask)\n\n batch_size = min(batch_size, len(dataset))\n nd = torch.cuda.device_count() # number of CUDA devices\n nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)\n loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates\n generator = torch.Generator()\n generator.manual_seed(6148914691236517205 + seed + RANK)\n return loader(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle and sampler is None,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn,\n worker_init_fn=seed_worker,\n generator=generator,\n ), dataset" }, { "identifier": "mask_iou", "path": "utils/segment/general.py", "snippet": "def mask_iou(mask1, mask2, eps=1e-7):\n \"\"\"\n mask1: [N, n] m1 means number of predicted objects\n mask2: [M, n] m2 means number of gt objects\n Note: n means image_w x image_h\n\n return: masks iou, [N, M]\n \"\"\"\n intersection = torch.matmul(mask1, mask2.t()).clamp(0)\n union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection\n return intersection / (union + eps)" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_native", "path": "utils/segment/general.py", "snippet": "def process_mask_native(protos, masks_in, bboxes, shape):\n \"\"\"\n Crop after upsample.\n protos: [mask_dim, mask_h, mask_w]\n masks_in: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape: input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n gain = min(mh / shape[0], mw / shape[1]) # gain = old / new\n pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(mh - pad[1]), int(mw - pad[0])\n masks = masks[:, top:bottom, left:right]\n\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "scale_image", "path": "utils/segment/general.py", "snippet": "def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):\n \"\"\"\n img1_shape: model input shape, [h, w]\n img0_shape: origin pic shape, [h, w, 3]\n masks: [h, w, num]\n \"\"\"\n # Rescale coordinates (xyxy) from im1_shape to im0_shape\n if ratio_pad is None: # calculate from im0_shape\n gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new\n pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding\n else:\n pad = ratio_pad[1]\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])\n\n if len(masks.shape) < 2:\n raise ValueError(f'\"len of masks shape\" should be 2 or 3, but got {len(masks.shape)}')\n masks = masks[top:bottom, left:right]\n # masks = masks.permute(2, 0, 1).contiguous()\n # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]\n # masks = masks.permute(1, 2, 0).contiguous()\n masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))\n\n if len(masks.shape) == 2:\n masks = masks[:, :, None]\n return masks" }, { "identifier": "Metrics", "path": "utils/segment/metrics.py", "snippet": "class Metrics:\n \"\"\"Metric for boxes and masks.\"\"\"\n\n def __init__(self) -> None:\n self.metric_box = Metric()\n self.metric_mask = Metric()\n\n def update(self, results):\n \"\"\"\n Args:\n results: Dict{'boxes': Dict{}, 'masks': Dict{}}\n \"\"\"\n self.metric_box.update(list(results['boxes'].values()))\n self.metric_mask.update(list(results['masks'].values()))\n\n def mean_results(self):\n return self.metric_box.mean_results() + self.metric_mask.mean_results()\n\n def class_result(self, i):\n return self.metric_box.class_result(i) + self.metric_mask.class_result(i)\n\n def get_maps(self, nc):\n return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)\n\n @property\n def ap_class_index(self):\n # boxes and masks have the same ap_class_index\n return self.metric_box.ap_class_index" }, { "identifier": "ap_per_class_box_and_mask", "path": "utils/segment/metrics.py", "snippet": "def ap_per_class_box_and_mask(\n tp_m,\n tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=False,\n save_dir='.',\n names=(),\n):\n \"\"\"\n Args:\n tp_b: tp of boxes.\n tp_m: tp of masks.\n other arguments see `func: ap_per_class`.\n \"\"\"\n results_boxes = ap_per_class(tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix='Box')[2:]\n results_masks = ap_per_class(tp_m,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix='Mask')[2:]\n\n results = {\n 'boxes': {\n 'p': results_boxes[0],\n 'r': results_boxes[1],\n 'ap': results_boxes[3],\n 'f1': results_boxes[2],\n 'ap_class': results_boxes[4]},\n 'masks': {\n 'p': results_masks[0],\n 'r': results_masks[1],\n 'ap': results_masks[3],\n 'f1': results_masks[2],\n 'ap_class': results_masks[4]}}\n return results" }, { "identifier": "plot_images_and_masks", "path": "utils/segment/plots.py", "snippet": "@threaded\ndef plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None):\n # Plot image grid with labels\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n if isinstance(masks, torch.Tensor):\n masks = masks.cpu().numpy().astype(int)\n\n max_size = 1920 # max image size\n max_subplots = 16 # max image subplots, i.e. 4x4\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n if np.max(images[0]) <= 1:\n images *= 255 # de-normalise (optional)\n\n # Build Image\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, im in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n im = im.transpose(1, 2, 0)\n mosaic[y:y + h, x:x + w, :] = im\n\n # Resize (optional)\n scale = max_size / ns / max(h, w)\n if scale < 1:\n h = math.ceil(scale * h)\n w = math.ceil(scale * w)\n mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))\n\n # Annotate\n fs = int((h + w) * ns * 0.01) # font size\n annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)\n for i in range(i + 1):\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders\n if paths:\n annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames\n if len(targets) > 0:\n idx = targets[:, 0] == i\n ti = targets[idx] # image targets\n\n boxes = xywh2xyxy(ti[:, 2:6]).T\n classes = ti[:, 1].astype('int')\n labels = ti.shape[1] == 6 # labels if no conf column\n conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale < 1: # absolute coords need scale if image scales\n boxes *= scale\n boxes[[0, 2]] += x\n boxes[[1, 3]] += y\n for j, box in enumerate(boxes.T.tolist()):\n cls = classes[j]\n color = colors(cls)\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'\n annotator.box_label(box, label, color=color)\n\n # Plot masks\n if len(masks):\n if masks.max() > 1.0: # mean that masks are overlap\n image_masks = masks[[i]] # (1, 640, 640)\n nl = len(ti)\n index = np.arange(nl).reshape(nl, 1, 1) + 1\n image_masks = np.repeat(image_masks, nl, axis=0)\n image_masks = np.where(image_masks == index, 1.0, 0.0)\n else:\n image_masks = masks[idx]\n\n im = np.asarray(annotator.im).copy()\n for j, box in enumerate(boxes.T.tolist()):\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n color = colors(classes[j])\n mh, mw = image_masks[j].shape\n if mh != h or mw != w:\n mask = image_masks[j].astype(np.uint8)\n mask = cv2.resize(mask, (w, h))\n mask = mask.astype(bool)\n else:\n mask = image_masks[j].astype(bool)\n with contextlib.suppress(Exception):\n im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6\n annotator.fromarray(im)\n annotator.im.save(fname) # save" }, { "identifier": "de_parallel", "path": "utils/torch_utils.py", "snippet": "def de_parallel(model):\n # De-parallelize a model: returns single-GPU model if model is of type DP or DDP\n return model.module if is_parallel(model) else model" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import json import os import subprocess import sys import numpy as np import torch import torch.nn.functional as F from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
16,628
save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]:
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]:
preds = non_max_suppression(preds,
3
2023-11-12 13:28:26+00:00
24k
cyberark/ark-sdk-python
ark_sdk_python/cli_services/dpa/db/ark_dpa_db_policies_editor_service.py
[ { "identifier": "ArkInquirerRender", "path": "ark_sdk_python/args/ark_args_formatter.py", "snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_generator=event_generator, theme=ARK_INQUIRER_THEME, *args, **kwargs)\n\n def render(self, question, answers=None):\n question.answers = answers or {}\n\n if question.ignore:\n return question.default\n\n clazz = self.render_factory(question.kind)\n render = clazz(question, terminal=self.terminal, theme=self._theme, show_default=question.show_default)\n if isinstance(\n render, (inquirer.render.console._text.Text, inquirer.render.console._password.Password, inquirer.render.console._path.Path)\n ):\n render.current = ''\n self.clear_eos()\n\n try:\n a = self._event_loop(render)\n if not a and question.default:\n a = question.default\n elif not a and question.name in answers:\n a = answers[question.name]\n return a\n finally:\n print('')\n\n def _print_header(self, render):\n base = render.get_header()\n\n header = base[: self.width - 9] + '...' if len(base) > self.width - 6 else base\n default_value = '{normal} ({default})'.format(default=render.question.default, normal=self.terminal.normal)\n show_default = render.question.default and render.show_default\n header += default_value if show_default else ''\n msg_template = '{t.move_up}{t.clear_eol}{tq.brackets_color}{tq.mark_color}?{tq.brackets_color} {msg}{t.normal}'\n\n escaped_current_value = str(render.get_current_value()).replace('{', '{{').replace('}', '}}')\n self.print_str(\n f'\\n{msg_template} {escaped_current_value}',\n msg=header,\n lf=not render.title_inline,\n tq=self._theme.Question,\n )" }, { "identifier": "ArkISPAuth", "path": "ark_sdk_python/auth/ark_isp_auth.py", "snippet": "class ArkISPAuth(ArkAuth):\n def __perform_identity_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=secret.secret.get_secret_value() if secret else None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, ArkSystemConfig.is_interactive() and method_settings.identity_mfa_interactive, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform [{str(ex)}]')\n raise ArkAuthException from ex\n\n def __perform_identity_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n load_cache=True,\n cache_profile=profile,\n )\n identity.refresh_auth_identity(profile, method_settings.identity_mfa_interactive, False)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n raise ArkAuthException('Failed to authenticate to isp via identity') from ex\n\n def __perform_identity_service_user_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n if not secret:\n raise ArkException('Token secret is required for identity service user auth')\n method_settings = cast(IdentityServiceUserArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentityServiceUser(\n username=auth_profile.username,\n token=secret.secret.get_secret_value(),\n app_name=method_settings.identity_authorization_application,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.IdentityServiceUser,\n expires_in=datetime.now() + timedelta(hours=4),\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform with service user [{str(ex)}]')\n raise ArkAuthException from ex\n\n @overrides\n def _perform_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret] = None, force: bool = False\n ) -> ArkToken:\n \"\"\"\n Performs authentication to the identity security platform identity tenant\n Authentication can be done with either a service user or a normal user\n Authentication Methods:\n - Identity, Default\n - IdentityServiceUser\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n secret (Optional[ArkSecret], optional): _description_. Defaults to None.\n force (bool, optional): _description_. Defaults to False.\n\n Raises:\n ArkAuthException: _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_authentication(profile, auth_profile, secret, force)\n if auth_profile.auth_method == ArkAuthMethod.IdentityServiceUser:\n return self.__perform_identity_service_user_authentication(profile, auth_profile, secret, force)\n raise ArkAuthException('Given auth method is not supported')\n\n @overrides\n def _perform_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n \"\"\"\n Refresh for isp tenant is supported only for identity\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n token (ArkToken): _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing refresh authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_refresh_authentication(profile, auth_profile, token)\n return token\n\n @staticmethod\n @overrides\n def authenticator_name() -> str:\n return AUTH_NAME\n\n @staticmethod\n @overrides\n def authenticator_human_readable_name() -> str:\n return AUTH_HUMAN_READABLE_NAME\n\n @staticmethod\n @overrides\n def supported_auth_methods() -> List[ArkAuthMethod]:\n return AUTH_METHODS\n\n @staticmethod\n @overrides\n def default_auth_method() -> Tuple[ArkAuthMethod, ArkAuthMethodSettings]:\n return DEFAULT_AUTH_METHOD, DEFAULT_AUTH_METHOD_SETTINGS" }, { "identifier": "ArkDPABasePoliciesEditorService", "path": "ark_sdk_python/cli_services/dpa/common/ark_dpa_base_policies_editor_service.py", "snippet": "class ArkDPABasePoliciesEditorService(\n ArkService, ABC, Generic[PolicyType, PolicyListItemType, AddPolicyType, UpdatePolicyType, GeneratePolicyType]\n):\n def __init__(\n self,\n policy_type: PolicyType,\n add_policy_type: AddPolicyType,\n update_policy_type: UpdatePolicyType,\n isp_auth: ArkISPAuth,\n policies_family: str,\n tenant_id: str,\n policies_cache_dir: Optional[str] = None,\n profile: Optional[ArkProfile] = None,\n ) -> None:\n super().__init__(isp_auth)\n profile = profile or ArkProfileLoader.load_default_profile()\n self._policies_family = policies_family\n self.__policies_cache_dir = Path(policies_cache_dir or Path.home() / '.ark_cache' / 'profiles' / profile.profile_name / tenant_id)\n if not policies_cache_dir and 'ARK_DPA_POLICIES_EDITOR_FOLDER' in os.environ:\n self.__policies_cache_dir = Path(os.environ['ARK_DPA_POLICIES_EDITOR_FOLDER'])\n self.__policies_cache_dir = self.__policies_cache_dir / policies_family\n self.__policies_cache_dir.mkdir(exist_ok=True, parents=True)\n self.__policy_type = policy_type\n self.__add_policy_type = add_policy_type\n self.__update_policy_type = update_policy_type\n\n @abstractmethod\n def _policy(self, get_policy: ArkDPAGetPolicy) -> PolicyType:\n pass\n\n @abstractmethod\n def _list_policies(self) -> List[PolicyListItemType]:\n pass\n\n @abstractmethod\n def _add_policy(self, add_policy: AddPolicyType) -> PolicyType:\n pass\n\n @abstractmethod\n def _update_policy(self, update_policy: UpdatePolicyType) -> PolicyType:\n pass\n\n @abstractmethod\n def _delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:\n pass\n\n @abstractmethod\n def _generate_policy(self, generate_policy: GeneratePolicyType, workspace_policies: List[PolicyType]) -> PolicyType:\n pass\n\n def __load_policy_diff(self, workspace_policy: PolicyType) -> Optional[Tuple[PolicyType, PolicyType]]:\n remote_policy = self._policy(ArkDPAGetPolicy(policy_id=str(workspace_policy.policy_id)))\n if remote_policy != workspace_policy:\n return (workspace_policy, remote_policy)\n return None\n\n def __load_policies_diff(self) -> Dict[str, Tuple[PolicyType, PolicyType]]:\n workspace_policies = self.__load_existing_policies_from_workspace()\n with ThreadPoolExecutor() as executor:\n remote_policies = {\n p[0].policy_name: p for p in executor.map(self.__load_policy_diff, workspace_policies.values()) if p is not None\n }\n return remote_policies\n\n def __load_policies_from_workspace_by_suffix(self, suffix: str = '') -> Dict[str, PolicyType]:\n p = Path(self.__policies_cache_dir).glob(f'*.json{suffix}')\n policies_files = [x for x in p if x.is_file() and x.suffix == suffix or '.json']\n policies = {}\n for f in policies_files:\n policy = self.__policy_type.parse_file(f)\n policies[policy.policy_name] = policy\n return policies\n\n def __load_removed_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix('.removed')\n\n def __load_generated_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix('.generated')\n\n def __load_existing_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix()\n\n def __load_policy_to_workspace(self, policy: PolicyListItemType, override: bool) -> Optional[PolicyType]:\n policy_data = self._policy(ArkDPAGetPolicy(policy_id=policy.policy_id))\n policy_path = Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json')\n if policy_path.exists():\n existing_data = self.__policy_type.parse_raw(policy_path.read_text())\n if existing_data != policy_data:\n if not override:\n return policy_data\n if not policy_data.policy_id:\n policy_data.policy_id = policy.policy_id\n policy_path.write_text(policy_data.json(indent=4))\n (Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json.removed')).unlink(missing_ok=True)\n\n def load_policies(self, load_policies: ArkDPALoadPolicies) -> ArkDPALoadedPolicies:\n \"\"\"\n Loads all remote policies into the local workspace.\n The user is asked whether to overwrite existing policies that were edited either locally or remotely.\n When default overwrite is enabled, existing policies are overwritten without prompts.\n\n Args:\n load_policies (ArkDPALoadPolicies): _description_\n\n Returns:\n ArkDPALoadedPolicies: _description_\n \"\"\"\n policies = self._list_policies()\n policies_to_query: Dict[str, PolicyType] = []\n with ThreadPoolExecutor() as executor:\n policies_to_query = {\n p.policy_name: p\n for p in executor.map(lambda p: self.__load_policy_to_workspace(p, load_policies.override), policies)\n if p is not None\n }\n # Build the query editor to ask the user\n policies_to_override = []\n if policies_to_query:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'override',\n message=f'Conflicts detected, please choose if you wish to override local {self._policies_family} policies or leave them as is',\n choices=[p.policy_name for p in policies_to_query.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policies_to_override = answers['override']\n for policy_name in policies_to_override:\n policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')\n if policy_path.exists() and policy_name in policies_to_query:\n policy_path.write_text(policies_to_query[policy_name].json(indent=4))\n return ArkDPALoadedPolicies(\n loaded_path=str(self.__policies_cache_dir),\n overall_policies_count=len(policies),\n loaded_policies_count=len(policies) - len(policies_to_query),\n overriden_policies_count=len(policies_to_override),\n untouched_policies_count=len(policies_to_query) - len(policies_to_override),\n )\n\n def edit_policies(self, edit_policies: ArkDPAEditPolicies) -> None:\n \"\"\"\n Edits the set of specified policies one at a time, either via the CLI or the default OS editor.\n Edited policies are only saved locally until they are committed.\n\n Args:\n edit_policies (ArkDPAEditPolicies): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n if not workspace_policies:\n raise ArkServiceException(\n f'No {self._policies_family} policies to edit in the workspace, please load the policies or generate a new one'\n )\n policy_names = edit_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to edit?, press space to select',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n try:\n answers = inquirer.prompt(\n [\n inquirer.Editor(f'{name}_edit', message=f'Chosen {self._policies_family} policy [{name}] is about to be edited')\n for name in policy_names\n ],\n render=ArkInquirerRender(),\n answers={f'{name}_edit': workspace_policies[name].json(indent=4) for name in policy_names},\n )\n for name in policy_names:\n policy = self.__policy_type.parse_raw(answers[f'{name}_edit'])\n for path in [\n Path(self.__policies_cache_dir) / (name + '.json'),\n Path(self.__policies_cache_dir) / (name + '.json.generated'),\n ]:\n if path.exists():\n path.write_text(policy.json(indent=4))\n break\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to edit {self._policies_family} policies, '\n f'you can edit the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def remove_policies(self, remove_policies: ArkDPARemovePolicies) -> None:\n \"\"\"\n Removes one or more policies from the local workspace.\n Until changes are committed, removing a remote policy only appends the `.deleted` indication to its name.\n After committing the changes, the policies are deleted both locally and remotely.\n New, uncommitted policies are deleted locally after the user consents.\n\n Args:\n remove_policies (ArkDPARemovePolicies): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n if not workspace_policies:\n raise ArkServiceException(\n f'No {self._policies_family} policies to remove in the workspace, please load the policies or generate a new one'\n )\n policy_names = remove_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to remove?, press space to select',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n for policy_name in policy_names:\n for path in [\n Path(self.__policies_cache_dir) / (policy_name + '.json'),\n Path(self.__policies_cache_dir) / (policy_name + '.json.generated'),\n ]:\n if path.exists():\n if path.suffix == '.json':\n path.rename(Path(self.__policies_cache_dir) / (policy_name + '.json.removed'))\n else:\n answers = inquirer.prompt(\n [\n inquirer.Confirm(\n 'remove',\n message=f'Are you sure you want to remove local {self._policies_family} policy [{policy_name}]?, removing an uncommitted local policy cannot be reverted',\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n if answers['remove']:\n path.unlink(missing_ok=True)\n\n def view_policies(self, view_policies: ArkDPAViewPolicies) -> None:\n \"\"\"\n Allows the user to view one or more policies either together or individually, as defined in the CLI user prompt.\n Policies are viewed in the machine's default editor (both existing policies and newly generated policies).\n\n Args:\n view_policies (ArkDPAViewPolicies): _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n policy_names = view_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to view?',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n if not policy_names:\n return\n try:\n if view_policies.unified:\n inquirer.prompt(\n [inquirer.Editor('views', f'Show all selected {self._policies_family} policies')],\n answers={\n 'views': '\\n\\n\\n'.join(\n [f'# Policy [{policy_name}]\\n{workspace_policies[policy_name].json(indent=4)}' for policy_name in policy_names]\n )\n },\n render=ArkInquirerRender(),\n )\n else:\n inquirer.prompt(\n [inquirer.Editor(f'{policy_name}_view', f'Show [{policy_name}]') for policy_name in policy_names],\n render=ArkInquirerRender(),\n answers={f'{policy_name}_view': workspace_policies[policy_name].json(indent=4) for policy_name in policy_names},\n )\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to view the {self._policies_family} policies, '\n f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def reset_policies(self, reset_policy: ArkDPAResetPolicies) -> None:\n \"\"\"\n Resets local workspace policies.\n When all policies are reset, all local policies are overwritten and deleted policies are removed.\n Otherwise, the user can select which policies are reset.\n This function does not alter newly generated uncommitted policies.\n\n Args:\n reset_policy (ArkDPAResetPolicies): _description_\n \"\"\"\n if reset_policy.all:\n answers = inquirer.prompt(\n [inquirer.Confirm('reset', message=f'Are you sure you want to reset all edited {self._policies_family} policies?')]\n )\n if not answers:\n return\n if answers['reset']:\n self.load_policies(ArkDPALoadPolicies(override=True))\n else:\n policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n if not policies_diff and not removed_policies:\n return\n policy_names = reset_policy.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to reset?, press space to select',\n choices=[p for p in policies_diff.keys() + removed_policies.keys()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n policy_names = [p for p in policy_names if p in policies_diff or p in removed_policies]\n for policy_name in policy_names:\n policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')\n if policy_name in policies_diff:\n policy_path.write_text(policies_diff[policy_name][1].json(indent=4))\n elif policy_name in removed_policies:\n policy_path.write_text(removed_policies[policy_name].json(indent=4))\n (Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)\n\n def generate_policy(self, generate_policy: GeneratePolicyType) -> None:\n \"\"\"\n Generates a new policy from a template and the user's parameters.\n The user is prompted for the parameters when they are not specified in the CLI.\n After policy's parameters are defined, the policy is generates in memory and can bee edited.\n The new policy is saved locally until it is committed.\n\n Args:\n generate_policy (GeneratePolicyType): _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n policy = self._generate_policy(generate_policy, workspace_policies)\n policy_path = Path(self.__policies_cache_dir) / (policy.policy_name + '.json.generated')\n # Let the user edit the generated policy\n if not generate_policy.disable_edit:\n try:\n answers = inquirer.prompt(\n [\n inquirer.Editor(\n 'policy_editor',\n f'Newly {self._policies_family} policy is generated and ready to be edited, once edited, it will be saved to the local workspace',\n )\n ],\n render=ArkInquirerRender(),\n answers={'policy_editor': policy.json(indent=4, exclude_none=True)},\n )\n if not answers:\n return\n policy = self.__policy_type.parse_raw(answers['policy_editor'])\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to edit the {self._policies_family} policy, '\n f'the policy will be saved to [{policy_path}] and can be edited manually [{str(ex)}]'\n )\n policy_path.write_text(policy.json(indent=4))\n\n def policies_diff(self, policies_diff: ArkDPAPoliciesDiff) -> None:\n \"\"\"\n Calculates the diff between the local workspace and remote policies.\n This diff includes uncommitted removed policies. A unified or per policy diff can be displayed.\n\n Args:\n policies_diff (ArkDPAPoliciesDiff): _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n if not loaded_policies_diff and not removed_policies:\n return\n if policies_diff.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in policies_diff.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in policies_diff.names}\n if not loaded_policies_diff and not removed_policies:\n return\n diffs = {\n policy_name: difflib.unified_diff(\n policy_tuple[1].json(indent=4).splitlines(True),\n policy_tuple[0].json(indent=4).splitlines(True),\n fromfile=f'local policy [{policy_name}]',\n tofile=f'remote policy [{policy_name}]',\n n=MAX_LINE_DIFF,\n )\n for policy_name, policy_tuple in loaded_policies_diff.items()\n }\n diffs.update(\n {\n policy_name: difflib.unified_diff(\n policy.json(indent=4).splitlines(True),\n '',\n fromfile=f'local policy [{policy_name}]',\n tofile=f'remote policy [{policy_name}]',\n n=MAX_LINE_DIFF,\n )\n for policy_name, policy in removed_policies.items()\n }\n )\n try:\n if policies_diff.unified:\n inquirer.prompt(\n [inquirer.Editor('diffs', 'Show all diffs')],\n render=ArkInquirerRender(),\n answers={'diffs': '\\n\\n\\n'.join([''.join(d) for d in diffs.values()])},\n )\n else:\n inquirer.prompt(\n [inquirer.Editor(f'{policy_name}_diff', f'Show [{policy_name}] diff') for policy_name in diffs.keys()],\n render=ArkInquirerRender(),\n answers={f'{policy_name}_diff': ''.join(policy_diffs) for policy_name, policy_diffs in diffs.items()},\n )\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to show {self._policies_family} policies diff, '\n f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def policies_status(self, get_policies_status: ArkDPAGetPoliciesStatus) -> ArkDPAPoliciesStatus:\n \"\"\"\n Gets the status of locally altered policies.\n\n Args:\n get_policies_status (ArkDPAGetPoliciesStatus): _description_\n\n Returns:\n ArkDPAPoliciesStatus: _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n generated_policies = self.__load_generated_policies_from_workspace()\n if get_policies_status.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in get_policies_status.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in get_policies_status.names}\n generated_policies = {k: v for k, v in generated_policies.items() if k in get_policies_status.names}\n return ArkDPAPoliciesStatus(\n modified_policies=list(loaded_policies_diff.keys()),\n removed_policies=list(removed_policies.keys()),\n added_policies=list(generated_policies.keys()),\n )\n\n def commit_policies(self, commit_policies: ArkDPACommitPolicies) -> None:\n \"\"\"\n Commits policies.\n The function first calculates the differences between the local and remote policies to find out which policies were edited, including\n the policies selected for deletion and new, uncommitted policies. It also\n allows selecting whether to commit all the edited policies or only specific policies by name.\n\n After all policies are committed, the workspace is reorganized accordingly.\n\n Args:\n commit_policies (ArkDPACommitPolicies): _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n generated_policies = self.__load_generated_policies_from_workspace()\n if not loaded_policies_diff and not removed_policies and not generated_policies:\n return\n if commit_policies.all:\n answers = inquirer.prompt(\n [inquirer.Confirm('reset', message=f'Are you sure you want to commit all edited {self._policies_family} policies?')]\n )\n if not answers or not answers['reset']:\n return\n else:\n if commit_policies.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in commit_policies.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in commit_policies.names}\n generated_policies = {k: v for k, v in generated_policies.items() if k in commit_policies.names}\n else:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to commit?, press space to select',\n choices=list(loaded_policies_diff.keys()) + list(removed_policies.keys()) + list(generated_policies.keys()),\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in answers['names']}\n removed_policies = {k: v for k, v in removed_policies.items() if k in answers['names']}\n generated_policies = {k: v for k, v in generated_policies.items() if k in answers['names']}\n if not loaded_policies_diff and not removed_policies and not generated_policies:\n return\n with ThreadPoolExecutor() as executor:\n added = executor.map(lambda p: self._add_policy(self.__add_policy_type(**p.dict())), generated_policies.values())\n updated = executor.map(lambda p: self._update_policy(self.__update_policy_type(**p[0].dict())), loaded_policies_diff.values())\n deleted = executor.map(\n lambda p: self._delete_policy(ArkDPADeletePolicy(policy_id=p.policy_id, policy_name=p.policy_name)),\n removed_policies.values(),\n )\n # Loop for exception checking\n added_policies = list(added)\n for _ in itertools.chain(updated, deleted):\n pass\n for policy_name in removed_policies.keys():\n (Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)\n for policy_name in generated_policies.keys():\n for policy in added_policies:\n if policy.policy_name == policy_name:\n (Path(self.__policies_cache_dir) / (policy_name + '.json.generated')).rename(\n (Path(self.__policies_cache_dir) / (policy_name + '.json'))\n )\n (Path(self.__policies_cache_dir) / (policy_name + '.json')).write_text(policy.json(indent=4))" }, { "identifier": "ArkProfile", "path": "ark_sdk_python/models/ark_profile.py", "snippet": "class ArkProfile(ArkModel):\n profile_name: str = Field(default='ark', alias='Profile Name', description='Profile name for storage')\n profile_description: str = Field(default='Default Ark Profile', alias='Profile Description', description='Info about the profile')\n auth_profiles: Dict[str, ArkAuthProfile] = Field(\n description='Authentication profiles configurations, map from name of the authenticator to its profile', default_factory=dict\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('auth_profiles', pre=True)\n def validate_auth_profiles(cls, val):\n auth_profiles = {}\n for k, v in val.items():\n auth_profile = ArkAuthProfile.parse_obj(v)\n # Make sure that the settings are parsed with the correct class\n # Due to properties overlapping\n if 'auth_method_settings' in v:\n auth_profile.auth_method_settings = ArkAuthMethodSettingsMap[auth_profile.auth_method].parse_obj(v['auth_method_settings'])\n auth_profiles[k] = auth_profile\n return auth_profiles" }, { "identifier": "ArkDPADBGeneratePolicy", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/db/ark_dpa_db_generate_policy.py", "snippet": "class ArkDPADBGeneratePolicy(ArkDPABaseGeneratePolicy):\n providers: Optional[Set[Literal['MySQL', 'MariaDB', 'Postgres', 'MSSQL', 'Oracle']]] = Field(\n description='Providers to generate the policy for'\n )" }, { "identifier": "ArkWorkspaceType", "path": "ark_sdk_python/models/common/ark_workspace_type.py", "snippet": "class ArkWorkspaceType(str, MultiValueEnum):\n AWS = 'aws', 'AWS', 'Aws'\n AZURE = 'azure', 'AZURE', 'Azure'\n ONPREM = 'onprem', 'ON-PREMISE', 'OnPrem'\n DB = 'db', 'DATABASES', 'Databases'\n GCP = 'gcp', 'GCP'\n MYSQL = 'mysql', 'MySQL'\n MARIADB = 'mariadb', 'MariaDB'\n MSSQL = 'mssql', 'MSSQL'\n ORACLE = 'oracle', 'Oracle'\n POSTGRES = 'postgres', 'Postgres'\n FAULT = 'fault', 'FAULT'\n UNKNOWN = 'unknown', 'UNKNOWN', 'Unknown'" }, { "identifier": "ArkServiceConfig", "path": "ark_sdk_python/models/services/ark_service_config.py", "snippet": "class ArkServiceConfig(ArkModel):\n service_name: str = Field(description='Name of the service')\n required_authenticator_names: List[str] = Field(description='Required authenticators for the service to properly work')\n optional_authenticator_names: List[str] = Field(\n description='Optional authenticators for the service for extra capabilities', default_factory=list\n )" }, { "identifier": "ArkDPADeletePolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_delete_policy.py", "snippet": "class ArkDPADeletePolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to delete')\n policy_name: Optional[str] = Field(description='Policy name to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPAGetPolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_get_policy.py", "snippet": "class ArkDPAGetPolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to get')\n policy_name: Optional[str] = Field(description='Policy name to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPARuleStatus", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_rule_status.py", "snippet": "class ArkDPARuleStatus(str, Enum):\n Enabled = 'Enabled'\n Disabled = 'Disabled'\n Draft = 'Draft'\n Expired = 'Expired'" }, { "identifier": "ArkDPAUserData", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_user_data.py", "snippet": "class ArkDPAUserData(ArkCamelizedModel):\n roles: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Roles allowed for auth rule', default_factory=list)\n groups: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Groups allowed for auth rule', default_factory=list)\n users: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Users allowed for auth rule', default_factory=list)" }, { "identifier": "ArkDPADBAddPolicy", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_add_policy.py", "snippet": "class ArkDPADBAddPolicy(ArkDPABaseAddPolicy):\n providers_tags: List[str] = Field(description='Policy tags to use as filters for the assets in the rules', default_factory=list)\n providers_data: Optional[ArkDPADBProvidersData] = Field(\n description='Policy providers data containing database assets of different types'\n )\n user_access_rules: Optional[List[ArkDPADBAuthorizationRule]] = Field(\n description='Authorization rules of the policy describing how and who can access the assets'\n )" }, { "identifier": "ArkDPADBAuthorizationRule", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_authorization_rule.py", "snippet": "class ArkDPADBAuthorizationRule(ArkDPABaseAuthorizationRule):\n connection_information: ArkDPADBConnectionInformation = Field(description='Rule information on how access is made')" }, { "identifier": "ArkDPADBConnectionInformation", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_authorization_rule.py", "snippet": "class ArkDPADBConnectionInformation(ArkDPABaseConnectionInformation):\n connect_as: ArkDPADBConnectAs = Field(description='In which fashion the connection is made')" }, { "identifier": "ArkDPADBAppliedTo", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBAppliedTo(ArkCamelizedModel):\n name: str = Field(description='Name of the resource to apply the auth to')\n type: ArkDPADBResourceIdentifierType = Field(description='Type of the resource')" }, { "identifier": "ArkDPADBBaseAuth", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBBaseAuth(ArkCamelizedModel):\n pass" }, { "identifier": "ArkDPADBConnectAs", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBConnectAs(ArkCamelizedModel):\n ldap_auth: Optional[Union[ArkDPADBLDAPAuth, List[ArkDPADBLDAPAuth]]] = Field(\n description='LDAP related authentication, only applies to MSSQL DB'\n )\n db_auth: Optional[Union[ArkDPADBLocalDBAuth, List[ArkDPADBLocalDBAuth]]] = Field(\n description='Local DB related authentication, only applies to MySQL / MariaDB / Postgres'\n )\n oracle_auth: Optional[Union[ArkDPADBOracleDBAuth, List[ArkDPADBOracleDBAuth]]] = Field(\n description='Oracle DB related authentication, only applies to Oracle'\n )" }, { "identifier": "ArkDPADBLDAPAuth", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBLDAPAuth(ArkDPADBBaseAuth):\n assign_groups: List[str] = Field(description='LDAP groups to assign the ephemeral user to')\n applied_to: Optional[List[ArkDPADBAppliedTo]] = Field(description='Which resources to apply to')" }, { "identifier": "ArkDPADBLocalDBAuth", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBLocalDBAuth(ArkDPADBBaseAuth):\n roles: List[str] = Field(description='Local DB roles to assign the ephemeral user to')\n applied_to: Optional[List[ArkDPADBAppliedTo]] = Field(description='Which resources to apply to')" }, { "identifier": "ArkDPADBOracleDBAuth", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBOracleDBAuth(ArkDPADBBaseAuth):\n roles: List[str] = Field(description='Local DB roles to assign the ephemeral user to')\n applied_to: Optional[List[ArkDPADBAppliedTo]] = Field(description='Which resources to apply to')\n dba_role: bool = Field(description='Whether to apply to the ephemeral user the DBA role', default=False)\n sysdba_role: bool = Field(description='Whether to apply to the ephemeral user the SYSDBA role', default=False)\n sysoper_role: bool = Field(description='Whether to apply to the ephemeral user the SYSOPER role', default=False)" }, { "identifier": "ArkDPADBResourceIdentifierType", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBResourceIdentifierType(str, Enum):\n RESOURCE = 'resource'\n TAG = 'tag'" }, { "identifier": "ArkDPADBPolicy", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_policy.py", "snippet": "class ArkDPADBPolicy(ArkDPABasePolicy):\n providers_tags: List[str] = Field(description='Policy tags', default_factory=list)\n providers_data: ArkDPADBProvidersData = Field(description='Policy providers data')\n user_access_rules: Optional[List[ArkDPADBAuthorizationRule]] = Field(description='Authorization rules of the policy')" }, { "identifier": "ArkDPADBPolicyListItem", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_policy_list_item.py", "snippet": "class ArkDPADBPolicyListItem(ArkDPABasePolicyListItem):\n providers: Optional[List[ArkWorkspaceType]] = Field(description='Names of the database providers of the policy')\n providers_tags: List[str] = Field(description='Tags on the policy', default_factory=list)\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers')\n def validate_platforms(cls, val):\n if val is not None:\n for plat in val:\n if ArkWorkspaceType(plat) not in [\n ArkWorkspaceType.MYSQL,\n ArkWorkspaceType.MARIADB,\n ArkWorkspaceType.POSTGRES,\n ArkWorkspaceType.MSSQL,\n ArkWorkspaceType.ORACLE,\n ]:\n raise ValueError('Invalid Database Type')\n return val" }, { "identifier": "ArkDPADB", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADB(ArkCamelizedModel):\n pass" }, { "identifier": "ArkDPADBMariaDB", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBMariaDB(ArkDPADBIdentifiers):\n pass" }, { "identifier": "ArkDPADBMSSQL", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBMSSQL(ArkDPADBIdentifiers):\n pass" }, { "identifier": "ArkDPADBMySQL", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBMySQL(ArkDPADBIdentifiers):\n pass" }, { "identifier": "ArkDPADBOracle", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBOracle(ArkDPADB):\n resources: List[Union[str, ArkDPADBOracleResource]] = Field(description='List of oracle resources / assets for the policy')" }, { "identifier": "ArkDPADBOracleResource", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBOracleResource(ArkCamelizedModel):\n name: str = Field(description='Name of the oracle db resource / asset')\n services: Optional[List[str]] = Field(description='Oracle services in the database')" }, { "identifier": "ArkDPADBPostgres", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBPostgres(ArkDPADBIdentifiers):\n pass" }, { "identifier": "ArkDPADBProvidersData", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBProvidersData(ArkCamelizedModel):\n mssql: Optional[ArkDPADBMSSQL] = Field(description='MSSQL related resources')\n mysql: Optional[ArkDPADBMySQL] = Field(description='MySQL related resources')\n mariadb: Optional[ArkDPADBMariaDB] = Field(description='MariaDB related resources')\n postgres: Optional[ArkDPADBPostgres] = Field(description='PostgreSQL related resources')\n oracle: Optional[ArkDPADBOracle] = Field(description='Oracle related resources')\n\n @root_validator\n @classmethod\n def validate_min_providers(cls, data: Dict) -> Dict[str, Any]:\n if isinstance(data, dict):\n if all(value is None for value in data.values()):\n raise ValueError('policy should contain at least one provider')\n return data" }, { "identifier": "ArkDPADBUpdatePolicy", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_update_policy.py", "snippet": "class ArkDPADBUpdatePolicy(ArkDPABaseUpdatePolicy):\n providers_tags: Optional[List[str]] = Field(description='Policy tags to use as filters for the assets in the rules')\n providers_data: Optional[ArkDPADBProvidersData] = Field(\n description='Policy providers data containing database assets of different types'\n )\n user_access_rules: Optional[List[ArkDPADBAuthorizationRule]] = Field(\n description='Authorization rules of the policy describing how and who can access the assets'\n )" }, { "identifier": "ArkDPADBPoliciesService", "path": "ark_sdk_python/services/dpa/policies/db/ark_dpa_db_policies_service.py", "snippet": "class ArkDPADBPoliciesService(ArkService):\n def __init__(self, isp_auth: ArkISPAuth) -> None:\n super().__init__(isp_auth)\n self.__isp_auth = isp_auth\n self.__client: ArkISPServiceClient = ArkISPServiceClient.from_isp_auth(self.__isp_auth, 'dpa')\n\n @property\n def isp_client(self) -> ArkISPServiceClient:\n return self.__client\n\n def __policy_id_by_name(self, policy_name: str) -> str:\n policies = self.list_policies_by(ArkDPADBPoliciesFilter(name=policy_name))\n if not policies:\n raise ArkServiceException(f'Failed to find db policy id by name [{policy_name}]')\n return policies[0].policy_id\n\n def add_policy(self, add_policy: ArkDPADBAddPolicy) -> ArkDPADBPolicy:\n \"\"\"\n Adds a new DB policy with the specified information.\n\n Args:\n add_policy (ArkDPADBAddPolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPADBPolicy: _description_\n \"\"\"\n self._logger.info(f'Adding new db policy [{add_policy.policy_name}]')\n add_policy_dict = add_policy.dict(by_alias=True, exclude_none=True)\n resp: Response = self.__client.post(DB_POLICIES_API, json=add_policy_dict)\n if resp.status_code == HTTPStatus.CREATED:\n try:\n policy_id = resp.json()['policyId']\n return self.policy(ArkDPAGetPolicy(policy_id=policy_id))\n except (ValidationError, JSONDecodeError, KeyError) as ex:\n self._logger.exception(f'Failed to parse add db policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse add sb policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to add db policy [{resp.text}] - [{resp.status_code}]')\n\n def delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:\n \"\"\"\n Deletes the specified (ID or name) DB policy.\n\n Args:\n delete_policy (ArkDPADeletePolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n if delete_policy.policy_name and not delete_policy.policy_id:\n delete_policy.policy_id = self.__policy_id_by_name(delete_policy.policy_name)\n self._logger.info(f'Deleting db policy [{delete_policy.policy_id}]')\n resp: Response = self.__client.delete(DB_POLICY_API.format(policy_id=delete_policy.policy_id))\n if resp.status_code != HTTPStatus.NO_CONTENT:\n raise ArkServiceException(f'Failed to delete db policy [{resp.text}] - [{resp.status_code}]')\n\n def update_policy(self, update_policy: ArkDPADBUpdatePolicy) -> ArkDPADBPolicy:\n \"\"\"\n Updates a DB policy.\n\n Args:\n update_policy (ArkDPADBUpdatePolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPADBPolicy: _description_\n \"\"\"\n if update_policy.policy_name and not update_policy.policy_id:\n update_policy.policy_id = self.__policy_id_by_name(update_policy.policy_name)\n self._logger.info(f'Updating db policy [{update_policy.policy_id}]')\n update_dict = json.loads(update_policy.json(by_alias=True, exclude_none=True, exclude={'new_policy_name', 'policy_name'}))\n if update_policy.new_policy_name:\n update_dict['policyName'] = update_policy.new_policy_name\n else:\n update_dict['policyName'] = update_policy.policy_name\n resp: Response = self.__client.put(DB_POLICY_API.format(policy_id=update_policy.policy_id), json=update_dict)\n if resp.status_code == HTTPStatus.OK:\n try:\n return ArkDPADBPolicy.parse_obj(resp.json())\n except (ValidationError, JSONDecodeError) as ex:\n self._logger.exception(f'Failed to parse update db policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse update db policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to update db policy [{resp.text}] - [{resp.status_code}]')\n\n def update_policy_status(self, update_policy_status: ArkDPAUpdatePolicyStatus) -> ArkDPADBPolicy:\n \"\"\"\n Updates the status of the specified (by ID) DB policy.\n\n Args:\n update_policy_status (ArkDPAUpdatePolicyStatus): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPADBPolicy: _description_\n \"\"\"\n if update_policy_status.policy_name and not update_policy_status.policy_id:\n update_policy_status.policy_id = self.__policy_id_by_name(update_policy_status.policy_name)\n self._logger.info(f'Updating db policy status [{update_policy_status.policy_id}]')\n resp: Response = self.__client.put(\n DB_UPDATE_POLICY_STATUS_API.format(policy_id=update_policy_status.policy_id),\n json=update_policy_status.dict(exclude={'policy_id'}),\n )\n if resp.status_code == HTTPStatus.OK:\n return self.policy(ArkDPAGetPolicy(policy_id=update_policy_status.policy_id))\n raise ArkServiceException(f'Failed to update db policy status [{resp.text}] - [{resp.status_code}]')\n\n def list_policies(self) -> List[ArkDPADBPolicyListItem]:\n \"\"\"\n Lists all of the tenants's DB policies.\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n List[ArkDPADBPolicyListItem]: _description_\n \"\"\"\n self._logger.info('Retrieving all db policies')\n resp: Response = self.__client.get(DB_POLICIES_API)\n if resp.status_code == HTTPStatus.OK:\n try:\n return parse_obj_as(List[ArkDPADBPolicyListItem], resp.json()['items'])\n except (ValidationError, JSONDecodeError, KeyError) as ex:\n self._logger.exception(f'Failed to parse list db policies response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse list db policies response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to list db policies [{resp.text}] - [{resp.status_code}]')\n\n def list_policies_by(self, policies_filter: ArkDPADBPoliciesFilter) -> List[ArkDPADBPolicyListItem]:\n \"\"\"\n Lists DB policies that match the specified filters.\n\n Args:\n policies_filter (ArkDPADBPoliciesFilter): _description_\n\n Returns:\n List[ArkDPADBPolicyListItem]: _description_\n \"\"\"\n self._logger.info(f'Retrieving db policies by filter [{policies_filter}]')\n policies = self.list_policies()\n\n # Filter by statuses\n if policies_filter.statuses:\n policies = [p for p in policies if p.status in policies_filter.statuses]\n\n # Filter by name wildcard\n if policies_filter.name:\n policies = [p for p in policies if fnmatch(p.policy_name, policies_filter.name)]\n\n # Filter by cloud providers\n if policies_filter.providers:\n policies = [p for p in policies if all(cp.value in p.providers for cp in policies_filter.providers)]\n\n return policies\n\n def policy(self, get_policy: ArkDPAGetPolicy) -> ArkDPADBPolicy:\n \"\"\"\n Retrieves a DB policy by ID.\n\n Args:\n get_policy (ArkDPAGetPolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPADBPolicy: _description_\n \"\"\"\n if get_policy.policy_name and not get_policy.policy_id:\n get_policy.policy_id = self.__policy_id_by_name(get_policy.policy_name)\n self._logger.info(f'Retrieving db policy [{get_policy.policy_id}]')\n resp: Response = self.__client.get(DB_POLICY_API.format(policy_id=get_policy.policy_id))\n if resp.status_code == HTTPStatus.OK:\n try:\n return ArkDPADBPolicy.parse_obj(resp.json())\n except (ValidationError, JSONDecodeError) as ex:\n self._logger.exception(f'Failed to parse db policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse db policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to retrieve db policy [{get_policy.policy_id}] [{resp.text}] - [{resp.status_code}]')\n\n def policies_stats(self) -> ArkDPADBPoliciesStats:\n \"\"\"\n Calculates policy statistics.\n\n Returns:\n ArkDPADBPoliciesStats: _description_\n \"\"\"\n self._logger.info('Calculating db policies stats')\n policies = self.list_policies()\n policies_stats = ArkDPADBPoliciesStats.construct()\n policies_stats.policies_count = len(policies)\n\n # Count policies per status\n status_types: Set[ArkDPARuleStatus] = {p.status for p in policies if p.status}\n policies_stats.policies_count_per_status = {st: len([p for p in policies if p.status and p.status == st]) for st in status_types}\n\n # Count policies per platforms\n policies_stats.policies_count_per_provider = {}\n for policy in policies:\n for platform in policy.providers:\n if platform not in policies_stats.policies_count_per_provider:\n policies_stats.policies_count_per_provider[platform] = 0\n policies_stats.policies_count_per_provider[platform] += 1\n\n return policies_stats\n\n @staticmethod\n @overrides\n def service_config() -> ArkServiceConfig:\n return SERVICE_CONFIG" } ]
from datetime import date, timedelta from typing import Dict, Final, List, Optional from overrides import overrides from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth from ark_sdk_python.cli_services.dpa.common.ark_dpa_base_policies_editor_service import ArkDPABasePoliciesEditorService from ark_sdk_python.models.ark_profile import ArkProfile from ark_sdk_python.models.cli_services.dpa.policies_editor.db import ArkDPADBGeneratePolicy from ark_sdk_python.models.common import ArkWorkspaceType from ark_sdk_python.models.services import ArkServiceConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPARuleStatus, ArkDPAUserData from ark_sdk_python.models.services.dpa.policies.db import ( ArkDPADB, ArkDPADBAddPolicy, ArkDPADBAppliedTo, ArkDPADBAuthorizationRule, ArkDPADBBaseAuth, ArkDPADBConnectAs, ArkDPADBConnectionInformation, ArkDPADBLDAPAuth, ArkDPADBLocalDBAuth, ArkDPADBMariaDB, ArkDPADBMSSQL, ArkDPADBMySQL, ArkDPADBOracle, ArkDPADBOracleDBAuth, ArkDPADBOracleResource, ArkDPADBPolicy, ArkDPADBPolicyListItem, ArkDPADBPostgres, ArkDPADBProvidersData, ArkDPADBResourceIdentifierType, ArkDPADBUpdatePolicy, ) from ark_sdk_python.services.dpa.policies.db.ark_dpa_db_policies_service import ArkDPADBPoliciesService import inquirer
14,687
SUPPORTED_DATABASE_TYPES: Final[List[str]] = [ 'MSSQL', 'MySQL', 'MariaDB', 'Postgres', 'Oracle', ] DEFAULT_GENERATED_POLICY: Final[ArkDPADBPolicy] = ArkDPADBPolicy( policy_name='Default DB Policy', status=ArkDPARuleStatus.Draft, description='Auto generated db policy', providers_data=ArkDPADBProvidersData( postgres=ArkDPADBPostgres( resources=['postgres-onboarded-asset'], ), ), start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPADB]] = { ArkWorkspaceType.MSSQL: ArkDPADBMSSQL(resources=['mssql-onboarded-asset']), ArkWorkspaceType.MYSQL: ArkDPADBMySQL(resources=['mysql-onboarded-asset']), ArkWorkspaceType.MARIADB: ArkDPADBMariaDB(resources=['mariadb-onboarded-asset']), ArkWorkspaceType.POSTGRES: ArkDPADBPostgres(resources=['postgres-onboarded-asset']), ArkWorkspaceType.ORACLE: ArkDPADBOracle( resources=[ ArkDPADBOracleResource( name='oracle-onboarded-asset', services=['XE'], ), ], ), } DEFAULT_GENERATED_AUTH_METHODS: Final[Dict[ArkWorkspaceType, ArkDPADBBaseAuth]] = { ArkWorkspaceType.MSSQL: ArkDPADBLDAPAuth( assign_groups=['DomainSQLAdmins'], applied_to=[ ArkDPADBAppliedTo( name='mssql-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.MYSQL: ArkDPADBLocalDBAuth( roles=['db_admin'], applied_to=[ ArkDPADBAppliedTo( name='mysql-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.MARIADB: ArkDPADBLocalDBAuth( roles=['db_admin'], applied_to=[ ArkDPADBAppliedTo( name='mariadb-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.POSTGRES: ArkDPADBLocalDBAuth( roles=['rds_superuser'], applied_to=[ ArkDPADBAppliedTo( name='postgres-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.ORACLE: ArkDPADBOracleDBAuth( roles=[], dba_role=True, sysdba_role=True, sysoper_role=False, applied_to=[ ArkDPADBAppliedTo( name='oracle-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), } DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPADBAuthorizationRule] = ArkDPADBAuthorizationRule( rule_name='Default DB Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPADBConnectionInformation( grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', connect_as=ArkDPADBConnectAs( db_auth=[ ArkDPADBLocalDBAuth( roles=['rds_superuser'], applied_to=[ ArkDPADBAppliedTo( name='postgres-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ], ), ), ) WORKSPACE_TO_PROVIDER_NAME: Final[Dict[ArkWorkspaceType, str]] = { ArkWorkspaceType.MSSQL: 'mssql', ArkWorkspaceType.MYSQL: 'mysql', ArkWorkspaceType.POSTGRES: 'postgres', ArkWorkspaceType.ORACLE: 'oracle', ArkWorkspaceType.MARIADB: 'mariadb', } class ArkDPADBPoliciesEditorService(
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-db-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) SUPPORTED_DATABASE_TYPES: Final[List[str]] = [ 'MSSQL', 'MySQL', 'MariaDB', 'Postgres', 'Oracle', ] DEFAULT_GENERATED_POLICY: Final[ArkDPADBPolicy] = ArkDPADBPolicy( policy_name='Default DB Policy', status=ArkDPARuleStatus.Draft, description='Auto generated db policy', providers_data=ArkDPADBProvidersData( postgres=ArkDPADBPostgres( resources=['postgres-onboarded-asset'], ), ), start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPADB]] = { ArkWorkspaceType.MSSQL: ArkDPADBMSSQL(resources=['mssql-onboarded-asset']), ArkWorkspaceType.MYSQL: ArkDPADBMySQL(resources=['mysql-onboarded-asset']), ArkWorkspaceType.MARIADB: ArkDPADBMariaDB(resources=['mariadb-onboarded-asset']), ArkWorkspaceType.POSTGRES: ArkDPADBPostgres(resources=['postgres-onboarded-asset']), ArkWorkspaceType.ORACLE: ArkDPADBOracle( resources=[ ArkDPADBOracleResource( name='oracle-onboarded-asset', services=['XE'], ), ], ), } DEFAULT_GENERATED_AUTH_METHODS: Final[Dict[ArkWorkspaceType, ArkDPADBBaseAuth]] = { ArkWorkspaceType.MSSQL: ArkDPADBLDAPAuth( assign_groups=['DomainSQLAdmins'], applied_to=[ ArkDPADBAppliedTo( name='mssql-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.MYSQL: ArkDPADBLocalDBAuth( roles=['db_admin'], applied_to=[ ArkDPADBAppliedTo( name='mysql-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.MARIADB: ArkDPADBLocalDBAuth( roles=['db_admin'], applied_to=[ ArkDPADBAppliedTo( name='mariadb-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.POSTGRES: ArkDPADBLocalDBAuth( roles=['rds_superuser'], applied_to=[ ArkDPADBAppliedTo( name='postgres-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.ORACLE: ArkDPADBOracleDBAuth( roles=[], dba_role=True, sysdba_role=True, sysoper_role=False, applied_to=[ ArkDPADBAppliedTo( name='oracle-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), } DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPADBAuthorizationRule] = ArkDPADBAuthorizationRule( rule_name='Default DB Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPADBConnectionInformation( grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', connect_as=ArkDPADBConnectAs( db_auth=[ ArkDPADBLocalDBAuth( roles=['rds_superuser'], applied_to=[ ArkDPADBAppliedTo( name='postgres-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ], ), ), ) WORKSPACE_TO_PROVIDER_NAME: Final[Dict[ArkWorkspaceType, str]] = { ArkWorkspaceType.MSSQL: 'mssql', ArkWorkspaceType.MYSQL: 'mysql', ArkWorkspaceType.POSTGRES: 'postgres', ArkWorkspaceType.ORACLE: 'oracle', ArkWorkspaceType.MARIADB: 'mariadb', } class ArkDPADBPoliciesEditorService(
ArkDPABasePoliciesEditorService[ArkDPADBPolicy, ArkDPADBPolicyListItem, ArkDPADBAddPolicy, ArkDPADBUpdatePolicy, ArkDPADBGeneratePolicy]
22
2023-11-13 09:24:31+00:00
24k
i-super/Saleor
saleor/graphql/payment/tests/mutations/test_payment_method_process_tokenization.py
[ { "identifier": "PaymentMethodProcessTokenizationRequestData", "path": "saleor/payment/interface.py", "snippet": "class PaymentMethodProcessTokenizationRequestData(\n PaymentMethodTokenizationBaseRequestData\n):\n \"\"\"Dataclass for storing the request information for payment app.\"\"\"\n\n id: str" }, { "identifier": "PaymentMethodTokenizationResponseData", "path": "saleor/payment/interface.py", "snippet": "class PaymentMethodTokenizationResponseData(PaymentMethodTokenizationBaseResponseData):\n \"\"\"Dataclass for storing the response information from payment app.\"\"\"\n\n result: PaymentMethodTokenizationResult\n id: Optional[str] = None" }, { "identifier": "PaymentMethodTokenizationResult", "path": "saleor/payment/interface.py", "snippet": "class PaymentMethodTokenizationResult(str, Enum):\n \"\"\"Result of tokenization of payment method.\n\n SUCCESSFULLY_TOKENIZED - The payment method was successfully tokenized.\n ADDITIONAL_ACTION_REQUIRED - The additional action is required to tokenize payment\n method.\n PENDING - The payment method is pending tokenization.\n FAILED_TO_TOKENIZE - The payment method was not tokenized.\n FAILED_TO_DELIVER - The request to tokenize payment method was not delivered.\n \"\"\"\n\n SUCCESSFULLY_TOKENIZED = \"successfully_tokenized\"\n PENDING = \"pending\"\n ADDITIONAL_ACTION_REQUIRED = \"additional_action_required\"\n FAILED_TO_TOKENIZE = \"failed_to_tokenize\"\n FAILED_TO_DELIVER = \"failed_to_deliver\"" }, { "identifier": "PluginsManager", "path": "saleor/plugins/manager.py", "snippet": "class PluginsManager(PaymentInterface):\n \"\"\"Base manager for handling plugins logic.\"\"\"\n\n plugins_per_channel: dict[str, list[\"BasePlugin\"]] = {}\n global_plugins: list[\"BasePlugin\"] = []\n all_plugins: list[\"BasePlugin\"] = []\n\n @property\n def database(self):\n return (\n settings.DATABASE_CONNECTION_REPLICA_NAME\n if self._allow_replica\n else settings.DATABASE_CONNECTION_DEFAULT_NAME\n )\n\n def _load_plugin(\n self,\n PluginClass: type[\"BasePlugin\"],\n db_configs_map: dict,\n channel: Optional[\"Channel\"] = None,\n requestor_getter=None,\n allow_replica=True,\n ) -> \"BasePlugin\":\n db_config = None\n if PluginClass.PLUGIN_ID in db_configs_map:\n db_config = db_configs_map[PluginClass.PLUGIN_ID]\n plugin_config = db_config.configuration\n active = db_config.active\n channel = db_config.channel\n else:\n plugin_config = PluginClass.DEFAULT_CONFIGURATION\n active = PluginClass.get_default_active()\n\n return PluginClass(\n configuration=plugin_config,\n active=active,\n channel=channel,\n requestor_getter=requestor_getter,\n db_config=db_config,\n allow_replica=allow_replica,\n )\n\n def __init__(self, plugins: list[str], requestor_getter=None, allow_replica=True):\n with opentracing.global_tracer().start_active_span(\"PluginsManager.__init__\"):\n self._allow_replica = allow_replica\n self.all_plugins = []\n self.global_plugins = []\n self.plugins_per_channel = defaultdict(list)\n\n channel_map = self._get_channel_map()\n global_db_configs, channel_db_configs = self._get_db_plugin_configs(\n channel_map\n )\n\n for plugin_path in plugins:\n with opentracing.global_tracer().start_active_span(f\"{plugin_path}\"):\n PluginClass = import_string(plugin_path)\n if not getattr(PluginClass, \"CONFIGURATION_PER_CHANNEL\", False):\n plugin = self._load_plugin(\n PluginClass,\n global_db_configs,\n requestor_getter=requestor_getter,\n allow_replica=allow_replica,\n )\n self.global_plugins.append(plugin)\n self.all_plugins.append(plugin)\n else:\n for channel in channel_map.values():\n channel_configs = channel_db_configs.get(channel, {})\n plugin = self._load_plugin(\n PluginClass,\n channel_configs,\n channel,\n requestor_getter,\n allow_replica,\n )\n self.plugins_per_channel[channel.slug].append(plugin)\n self.all_plugins.append(plugin)\n\n for channel in channel_map.values():\n self.plugins_per_channel[channel.slug].extend(self.global_plugins)\n\n def _get_db_plugin_configs(self, channel_map):\n with opentracing.global_tracer().start_active_span(\"_get_db_plugin_configs\"):\n plugin_manager_configs = PluginConfiguration.objects.using(\n self.database\n ).all()\n channel_configs: defaultdict[Channel, dict] = defaultdict(dict)\n global_configs = {}\n for db_plugin_config in plugin_manager_configs.iterator():\n channel = channel_map.get(db_plugin_config.channel_id)\n if channel is None:\n global_configs[db_plugin_config.identifier] = db_plugin_config\n else:\n db_plugin_config.channel = channel\n channel_configs[channel][\n db_plugin_config.identifier\n ] = db_plugin_config\n\n return global_configs, channel_configs\n\n def __run_method_on_plugins(\n self,\n method_name: str,\n default_value: Any,\n *args,\n channel_slug: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Try to run a method with the given name on each declared active plugin.\"\"\"\n value = default_value\n plugins = self.get_plugins(channel_slug=channel_slug, active_only=True)\n for plugin in plugins:\n value = self.__run_method_on_single_plugin(\n plugin, method_name, value, *args, **kwargs\n )\n return value\n\n def __run_method_on_single_plugin(\n self,\n plugin: Optional[\"BasePlugin\"],\n method_name: str,\n previous_value: Any,\n *args,\n **kwargs,\n ) -> Any:\n \"\"\"Run method_name on plugin.\n\n Method will return value returned from plugin's\n method. If plugin doesn't have own implementation of expected method_name, it\n will return previous_value.\n \"\"\"\n plugin_method = getattr(plugin, method_name, NotImplemented)\n if plugin_method == NotImplemented:\n return previous_value\n returned_value = plugin_method(*args, **kwargs, previous_value=previous_value) # type:ignore\n if returned_value == NotImplemented:\n return previous_value\n return returned_value\n\n def check_payment_balance(self, details: dict, channel_slug: str) -> dict:\n return self.__run_method_on_plugins(\n \"check_payment_balance\", None, details, channel_slug=channel_slug\n )\n\n def change_user_address(\n self,\n address: \"Address\",\n address_type: Optional[str],\n user: Optional[\"User\"],\n save: bool = True,\n ) -> \"Address\":\n default_value = address\n return self.__run_method_on_plugins(\n \"change_user_address\", default_value, address, address_type, user, save\n )\n\n def calculate_checkout_total(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n address: Optional[\"Address\"],\n ) -> TaxedMoney:\n currency = checkout_info.checkout.currency\n\n default_value = base_calculations.checkout_total(\n checkout_info,\n lines,\n )\n taxed_default_value = TaxedMoney(net=default_value, gross=default_value)\n\n if default_value <= zero_money(currency):\n return quantize_price(\n taxed_default_value,\n currency,\n )\n\n return quantize_price(\n self.__run_method_on_plugins(\n \"calculate_checkout_total\",\n taxed_default_value,\n checkout_info,\n lines,\n address,\n channel_slug=checkout_info.channel.slug,\n ),\n currency,\n )\n\n def calculate_checkout_subtotal(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n address: Optional[\"Address\"],\n ) -> TaxedMoney:\n line_totals = [\n self.calculate_checkout_line_total(\n checkout_info,\n lines,\n line_info,\n address,\n )\n for line_info in lines\n ]\n currency = checkout_info.checkout.currency\n total = sum(line_totals, zero_taxed_money(currency))\n return quantize_price(\n total,\n currency,\n )\n\n def calculate_checkout_shipping(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n address: Optional[\"Address\"],\n ) -> TaxedMoney:\n price = base_calculations.base_checkout_delivery_price(checkout_info, lines)\n default_value = TaxedMoney(price, price)\n return quantize_price(\n self.__run_method_on_plugins(\n \"calculate_checkout_shipping\",\n default_value,\n checkout_info,\n lines,\n address,\n channel_slug=checkout_info.channel.slug,\n ),\n checkout_info.checkout.currency,\n )\n\n def calculate_order_total(\n self,\n order: \"Order\",\n lines: Iterable[\"OrderLine\"],\n ) -> TaxedMoney:\n currency = order.currency\n default_value = base_order_calculations.base_order_total(order, lines)\n default_value = TaxedMoney(default_value, default_value)\n if default_value <= zero_taxed_money(currency):\n return quantize_price(\n default_value,\n currency,\n )\n\n return quantize_price(\n self.__run_method_on_plugins(\n \"calculate_order_total\",\n default_value,\n order,\n lines,\n channel_slug=order.channel.slug,\n ),\n currency,\n )\n\n def calculate_order_shipping(self, order: \"Order\") -> TaxedMoney:\n shipping_price = order.base_shipping_price\n default_value = quantize_price(\n TaxedMoney(net=shipping_price, gross=shipping_price),\n shipping_price.currency,\n )\n return quantize_price(\n self.__run_method_on_plugins(\n \"calculate_order_shipping\",\n default_value,\n order,\n channel_slug=order.channel.slug,\n ),\n order.currency,\n )\n\n def get_checkout_shipping_tax_rate(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n address: Optional[\"Address\"],\n shipping_price: TaxedMoney,\n ):\n default_value = calculate_tax_rate(shipping_price)\n return self.__run_method_on_plugins(\n \"get_checkout_shipping_tax_rate\",\n default_value,\n checkout_info,\n lines,\n address,\n channel_slug=checkout_info.channel.slug,\n ).quantize(Decimal(\".0001\"))\n\n def get_order_shipping_tax_rate(self, order: \"Order\", shipping_price: TaxedMoney):\n default_value = calculate_tax_rate(shipping_price)\n return self.__run_method_on_plugins(\n \"get_order_shipping_tax_rate\",\n default_value,\n order,\n channel_slug=order.channel.slug,\n ).quantize(Decimal(\".0001\"))\n\n def calculate_checkout_line_total(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n checkout_line_info: \"CheckoutLineInfo\",\n address: Optional[\"Address\"],\n ) -> TaxedMoney:\n default_value = base_calculations.calculate_base_line_total_price(\n checkout_line_info,\n checkout_info.channel,\n )\n # apply entire order discount\n default_value = base_calculations.apply_checkout_discount_on_checkout_line(\n checkout_info,\n lines,\n checkout_line_info,\n default_value,\n )\n default_value = quantize_price(default_value, checkout_info.checkout.currency)\n default_taxed_value = TaxedMoney(net=default_value, gross=default_value)\n line_total = self.__run_method_on_plugins(\n \"calculate_checkout_line_total\",\n default_taxed_value,\n checkout_info,\n lines,\n checkout_line_info,\n address,\n channel_slug=checkout_info.channel.slug,\n )\n\n return quantize_price(line_total, checkout_info.checkout.currency)\n\n def calculate_order_line_total(\n self,\n order: \"Order\",\n order_line: \"OrderLine\",\n variant: \"ProductVariant\",\n product: \"Product\",\n ) -> OrderTaxedPricesData:\n default_value = base_order_calculations.base_order_line_total(order_line)\n currency = order_line.currency\n\n line_total = self.__run_method_on_plugins(\n \"calculate_order_line_total\",\n default_value,\n order,\n order_line,\n variant,\n product,\n channel_slug=order.channel.slug,\n )\n\n line_total.price_with_discounts = quantize_price(\n line_total.price_with_discounts, currency\n )\n line_total.undiscounted_price = quantize_price(\n line_total.undiscounted_price, currency\n )\n return line_total\n\n def calculate_checkout_line_unit_price(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n checkout_line_info: \"CheckoutLineInfo\",\n address: Optional[\"Address\"],\n ) -> TaxedMoney:\n quantity = checkout_line_info.line.quantity\n default_value = base_calculations.calculate_base_line_unit_price(\n checkout_line_info, checkout_info.channel\n )\n # apply entire order discount\n total_value = base_calculations.apply_checkout_discount_on_checkout_line(\n checkout_info,\n lines,\n checkout_line_info,\n default_value * quantity,\n )\n default_taxed_value = TaxedMoney(\n net=total_value / quantity, gross=default_value\n )\n unit_price = self.__run_method_on_plugins(\n \"calculate_checkout_line_unit_price\",\n default_taxed_value,\n checkout_info,\n lines,\n checkout_line_info,\n address,\n channel_slug=checkout_info.channel.slug,\n )\n return quantize_price(unit_price, checkout_info.checkout.currency)\n\n def calculate_order_line_unit(\n self,\n order: \"Order\",\n order_line: \"OrderLine\",\n variant: \"ProductVariant\",\n product: \"Product\",\n ) -> OrderTaxedPricesData:\n default_value = OrderTaxedPricesData(\n undiscounted_price=TaxedMoney(\n order_line.undiscounted_base_unit_price,\n order_line.undiscounted_base_unit_price,\n ),\n price_with_discounts=TaxedMoney(\n order_line.base_unit_price,\n order_line.base_unit_price,\n ),\n )\n currency = order_line.currency\n line_unit = self.__run_method_on_plugins(\n \"calculate_order_line_unit\",\n default_value,\n order,\n order_line,\n variant,\n product,\n channel_slug=order.channel.slug,\n )\n line_unit.price_with_discounts = quantize_price(\n line_unit.price_with_discounts, currency\n )\n line_unit.undiscounted_price = quantize_price(\n line_unit.undiscounted_price, currency\n )\n return line_unit\n\n def get_checkout_line_tax_rate(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n checkout_line_info: \"CheckoutLineInfo\",\n address: Optional[\"Address\"],\n price: TaxedMoney,\n ) -> Decimal:\n default_value = calculate_tax_rate(price)\n return self.__run_method_on_plugins(\n \"get_checkout_line_tax_rate\",\n default_value,\n checkout_info,\n lines,\n checkout_line_info,\n address,\n channel_slug=checkout_info.channel.slug,\n ).quantize(Decimal(\".0001\"))\n\n def get_order_line_tax_rate(\n self,\n order: \"Order\",\n product: \"Product\",\n variant: \"ProductVariant\",\n address: Optional[\"Address\"],\n unit_price: TaxedMoney,\n ) -> Decimal:\n default_value = calculate_tax_rate(unit_price)\n return self.__run_method_on_plugins(\n \"get_order_line_tax_rate\",\n default_value,\n order,\n product,\n variant,\n address,\n channel_slug=order.channel.slug,\n ).quantize(Decimal(\".0001\"))\n\n def get_tax_rate_type_choices(self) -> list[TaxType]:\n default_value: list = []\n return self.__run_method_on_plugins(\"get_tax_rate_type_choices\", default_value)\n\n def show_taxes_on_storefront(self) -> bool:\n default_value = False\n return self.__run_method_on_plugins(\"show_taxes_on_storefront\", default_value)\n\n def get_taxes_for_checkout(self, checkout_info, lines) -> Optional[TaxData]:\n return self.__run_plugin_method_until_first_success(\n \"get_taxes_for_checkout\",\n checkout_info,\n lines,\n channel_slug=checkout_info.channel.slug,\n )\n\n def get_taxes_for_order(self, order: \"Order\") -> Optional[TaxData]:\n return self.__run_plugin_method_until_first_success(\n \"get_taxes_for_order\", order, channel_slug=order.channel.slug\n )\n\n def preprocess_order_creation(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Optional[Iterable[\"CheckoutLineInfo\"]] = None,\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"preprocess_order_creation\",\n default_value,\n checkout_info,\n lines,\n channel_slug=checkout_info.channel.slug,\n )\n\n def customer_created(self, customer: \"User\"):\n default_value = None\n return self.__run_method_on_plugins(\"customer_created\", default_value, customer)\n\n def customer_deleted(self, customer: \"User\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"customer_deleted\", default_value, customer, webhooks=webhooks\n )\n\n def customer_updated(self, customer: \"User\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"customer_updated\", default_value, customer, webhooks=webhooks\n )\n\n def customer_metadata_updated(self, customer: \"User\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"customer_metadata_updated\", default_value, customer, webhooks=webhooks\n )\n\n def collection_created(self, collection: \"Collection\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"collection_created\", default_value, collection\n )\n\n def collection_updated(self, collection: \"Collection\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"collection_updated\", default_value, collection\n )\n\n def collection_deleted(self, collection: \"Collection\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"collection_deleted\", default_value, collection, webhooks=webhooks\n )\n\n def collection_metadata_updated(self, collection: \"Collection\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"collection_metadata_updated\", default_value, collection\n )\n\n def product_created(self, product: \"Product\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_created\", default_value, product, webhooks=webhooks\n )\n\n def product_updated(self, product: \"Product\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_updated\", default_value, product, webhooks=webhooks\n )\n\n def product_deleted(self, product: \"Product\", variants: list[int], webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_deleted\", default_value, product, variants, webhooks=webhooks\n )\n\n def product_media_created(self, media: \"ProductMedia\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_media_created\", default_value, media\n )\n\n def product_media_updated(self, media: \"ProductMedia\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_media_updated\", default_value, media\n )\n\n def product_media_deleted(self, media: \"ProductMedia\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_media_deleted\", default_value, media\n )\n\n def product_metadata_updated(self, product: \"Product\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_metadata_updated\", default_value, product\n )\n\n def product_variant_created(self, product_variant: \"ProductVariant\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_variant_created\", default_value, product_variant, webhooks=webhooks\n )\n\n def product_variant_updated(self, product_variant: \"ProductVariant\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_variant_updated\", default_value, product_variant, webhooks=webhooks\n )\n\n def product_variant_deleted(self, product_variant: \"ProductVariant\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_variant_deleted\", default_value, product_variant, webhooks=webhooks\n )\n\n def product_variant_out_of_stock(self, stock: \"Stock\", webhooks=None):\n default_value = None\n self.__run_method_on_plugins(\n \"product_variant_out_of_stock\", default_value, stock, webhooks=webhooks\n )\n\n def product_variant_back_in_stock(self, stock: \"Stock\", webhooks=None):\n default_value = None\n self.__run_method_on_plugins(\n \"product_variant_back_in_stock\", default_value, stock, webhooks=webhooks\n )\n\n def product_variant_stock_updated(self, stock: \"Stock\", webhooks=None):\n default_value = None\n self.__run_method_on_plugins(\n \"product_variant_stock_updated\", default_value, stock, webhooks=webhooks\n )\n\n def product_variant_metadata_updated(self, product_variant: \"ProductVariant\"):\n default_value = None\n self.__run_method_on_plugins(\n \"product_variant_metadata_updated\", default_value, product_variant\n )\n\n def product_export_completed(self, export: \"ExportFile\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_export_completed\", default_value, export\n )\n\n def order_created(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_created\", default_value, order, channel_slug=order.channel.slug\n )\n\n def event_delivery_retry(self, event_delivery: \"EventDelivery\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"event_delivery_retry\", default_value, event_delivery\n )\n\n def order_confirmed(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_confirmed\", default_value, order, channel_slug=order.channel.slug\n )\n\n def draft_order_created(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"draft_order_created\", default_value, order, channel_slug=order.channel.slug\n )\n\n def draft_order_updated(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"draft_order_updated\", default_value, order, channel_slug=order.channel.slug\n )\n\n def draft_order_deleted(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"draft_order_deleted\", default_value, order, channel_slug=order.channel.slug\n )\n\n def sale_created(self, sale: \"Promotion\", current_catalogue):\n default_value = None\n return self.__run_method_on_plugins(\n \"sale_created\", default_value, sale, current_catalogue\n )\n\n def sale_deleted(self, sale: \"Promotion\", previous_catalogue, webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"sale_deleted\", default_value, sale, previous_catalogue, webhooks=webhooks\n )\n\n def sale_updated(self, sale: \"Promotion\", previous_catalogue, current_catalogue):\n default_value = None\n return self.__run_method_on_plugins(\n \"sale_updated\", default_value, sale, previous_catalogue, current_catalogue\n )\n\n def sale_toggle(self, sale: \"Promotion\", catalogue):\n default_value = None\n return self.__run_method_on_plugins(\n \"sale_toggle\", default_value, sale, catalogue\n )\n\n def promotion_created(self, promotion: \"Promotion\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_created\", default_value, promotion\n )\n\n def promotion_updated(self, promotion: \"Promotion\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_updated\", default_value, promotion\n )\n\n def promotion_deleted(self, promotion: \"Promotion\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_deleted\", default_value, promotion, webhooks=webhooks\n )\n\n def promotion_started(self, promotion: \"Promotion\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_started\", default_value, promotion\n )\n\n def promotion_ended(self, promotion: \"Promotion\"):\n default_value = None\n return self.__run_method_on_plugins(\"promotion_ended\", default_value, promotion)\n\n def promotion_rule_created(self, promotion_rule: \"PromotionRule\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_rule_created\", default_value, promotion_rule\n )\n\n def promotion_rule_updated(self, promotion_rule: \"PromotionRule\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_rule_updated\", default_value, promotion_rule\n )\n\n def promotion_rule_deleted(self, promotion_rule: \"PromotionRule\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_rule_deleted\", default_value, promotion_rule\n )\n\n def invoice_request(\n self, order: \"Order\", invoice: \"Invoice\", number: Optional[str]\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"invoice_request\",\n default_value,\n order,\n invoice,\n number,\n channel_slug=order.channel.slug,\n )\n\n def invoice_delete(self, invoice: \"Invoice\"):\n default_value = None\n channel_slug = invoice.order.channel.slug if invoice.order else None\n return self.__run_method_on_plugins(\n \"invoice_delete\",\n default_value,\n invoice,\n channel_slug=channel_slug,\n )\n\n def invoice_sent(self, invoice: \"Invoice\", email: str):\n default_value = None\n channel_slug = invoice.order.channel.slug if invoice.order else None\n return self.__run_method_on_plugins(\n \"invoice_sent\",\n default_value,\n invoice,\n email,\n channel_slug=channel_slug,\n )\n\n def order_fully_paid(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_fully_paid\", default_value, order, channel_slug=order.channel.slug\n )\n\n def order_paid(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_paid\", default_value, order, channel_slug=order.channel.slug\n )\n\n def order_fully_refunded(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_fully_refunded\",\n default_value,\n order,\n channel_slug=order.channel.slug,\n )\n\n def order_refunded(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_refunded\", default_value, order, channel_slug=order.channel.slug\n )\n\n def order_updated(self, order: \"Order\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_updated\",\n default_value,\n order,\n channel_slug=order.channel.slug,\n webhooks=webhooks,\n )\n\n def order_cancelled(self, order: \"Order\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_cancelled\",\n default_value,\n order,\n channel_slug=order.channel.slug,\n webhooks=webhooks,\n )\n\n def order_expired(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_expired\", default_value, order, channel_slug=order.channel.slug\n )\n\n def order_fulfilled(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_fulfilled\", default_value, order, channel_slug=order.channel.slug\n )\n\n def order_metadata_updated(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_metadata_updated\", default_value, order\n )\n\n def order_bulk_created(self, orders: list[\"Order\"]):\n default_value = None\n return self.__run_method_on_plugins(\"order_bulk_created\", default_value, orders)\n\n def fulfillment_created(\n self, fulfillment: \"Fulfillment\", notify_customer: Optional[bool] = True\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"fulfillment_created\",\n default_value,\n fulfillment,\n channel_slug=fulfillment.order.channel.slug,\n notify_customer=notify_customer,\n )\n\n def fulfillment_canceled(self, fulfillment: \"Fulfillment\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"fulfillment_canceled\",\n default_value,\n fulfillment,\n channel_slug=fulfillment.order.channel.slug,\n )\n\n def fulfillment_approved(\n self, fulfillment: \"Fulfillment\", notify_customer: Optional[bool] = True\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"fulfillment_approved\",\n default_value,\n fulfillment,\n channel_slug=fulfillment.order.channel.slug,\n notify_customer=notify_customer,\n )\n\n def fulfillment_metadata_updated(self, fulfillment: \"Fulfillment\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"fulfillment_metadata_updated\", default_value, fulfillment\n )\n\n def tracking_number_updated(self, fulfillment: \"Fulfillment\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"tracking_number_updated\",\n default_value,\n fulfillment,\n channel_slug=fulfillment.order.channel.slug,\n )\n\n def checkout_created(self, checkout: \"Checkout\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"checkout_created\",\n default_value,\n checkout,\n channel_slug=checkout.channel.slug,\n )\n\n def checkout_updated(self, checkout: \"Checkout\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"checkout_updated\",\n default_value,\n checkout,\n channel_slug=checkout.channel.slug,\n )\n\n def checkout_fully_paid(self, checkout: \"Checkout\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"checkout_fully_paid\",\n default_value,\n checkout,\n channel_slug=checkout.channel.slug,\n )\n\n def checkout_metadata_updated(self, checkout: \"Checkout\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"checkout_metadata_updated\", default_value, checkout\n )\n\n def page_created(self, page: \"Page\"):\n default_value = None\n return self.__run_method_on_plugins(\"page_created\", default_value, page)\n\n def page_updated(self, page: \"Page\"):\n default_value = None\n return self.__run_method_on_plugins(\"page_updated\", default_value, page)\n\n def page_deleted(self, page: \"Page\"):\n default_value = None\n return self.__run_method_on_plugins(\"page_deleted\", default_value, page)\n\n def page_type_created(self, page_type: \"PageType\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"page_type_created\", default_value, page_type\n )\n\n def page_type_updated(self, page_type: \"PageType\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"page_type_updated\", default_value, page_type\n )\n\n def page_type_deleted(self, page_type: \"PageType\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"page_type_deleted\", default_value, page_type\n )\n\n def permission_group_created(self, group: \"Group\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"permission_group_created\", default_value, group\n )\n\n def permission_group_updated(self, group: \"Group\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"permission_group_updated\", default_value, group\n )\n\n def permission_group_deleted(self, group: \"Group\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"permission_group_deleted\", default_value, group\n )\n\n def transaction_charge_requested(\n self, payment_data: \"TransactionActionData\", channel_slug: str\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_charge_requested\",\n default_value,\n payment_data,\n channel_slug=channel_slug,\n )\n\n def transaction_refund_requested(\n self, payment_data: \"TransactionActionData\", channel_slug: str\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_refund_requested\",\n default_value,\n payment_data,\n channel_slug=channel_slug,\n )\n\n def transaction_cancelation_requested(\n self, payment_data: \"TransactionActionData\", channel_slug: str\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_cancelation_requested\",\n default_value,\n payment_data,\n channel_slug=channel_slug,\n )\n\n def payment_gateway_initialize_session(\n self,\n amount: Decimal,\n payment_gateways: Optional[list[\"PaymentGatewayData\"]],\n source_object: Union[\"Order\", \"Checkout\"],\n ) -> list[\"PaymentGatewayData\"]:\n default_value = None\n return self.__run_method_on_plugins(\n \"payment_gateway_initialize_session\",\n default_value,\n amount,\n payment_gateways,\n source_object,\n channel_slug=source_object.channel.slug,\n )\n\n def transaction_initialize_session(\n self,\n transaction_session_data: \"TransactionSessionData\",\n ) -> \"TransactionSessionResult\":\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_initialize_session\",\n default_value,\n transaction_session_data,\n channel_slug=transaction_session_data.source_object.channel.slug,\n )\n\n def transaction_process_session(\n self,\n transaction_session_data: \"TransactionSessionData\",\n ) -> \"TransactionSessionResult\":\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_process_session\",\n default_value,\n transaction_session_data,\n channel_slug=transaction_session_data.source_object.channel.slug,\n )\n\n def transaction_item_metadata_updated(self, transaction_item: \"TransactionItem\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_item_metadata_updated\", default_value, transaction_item\n )\n\n def account_confirmed(self, user: \"User\"):\n default_value = None\n return self.__run_method_on_plugins(\"account_confirmed\", default_value, user)\n\n def account_confirmation_requested(\n self, user: \"User\", channel_slug: str, token: str, redirect_url: Optional[str]\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"account_confirmation_requested\",\n default_value,\n user,\n channel_slug,\n token=token,\n redirect_url=redirect_url,\n )\n\n def account_change_email_requested(\n self,\n user: \"User\",\n channel_slug: str,\n token: str,\n redirect_url: str,\n new_email: str,\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"account_change_email_requested\",\n default_value,\n user,\n channel_slug,\n token=token,\n redirect_url=redirect_url,\n new_email=new_email,\n )\n\n def account_email_changed(\n self,\n user: \"User\",\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"account_email_changed\",\n default_value,\n user,\n )\n\n def account_set_password_requested(\n self,\n user: \"User\",\n channel_slug: str,\n token: str,\n redirect_url: str,\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"account_set_password_requested\",\n default_value,\n user,\n channel_slug,\n token=token,\n redirect_url=redirect_url,\n )\n\n def account_delete_requested(\n self, user: \"User\", channel_slug: str, token: str, redirect_url: str\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"account_delete_requested\",\n default_value,\n user,\n channel_slug,\n token=token,\n redirect_url=redirect_url,\n )\n\n def account_deleted(self, user: \"User\"):\n default_value = None\n return self.__run_method_on_plugins(\"account_deleted\", default_value, user)\n\n def address_created(self, address: \"Address\"):\n default_value = None\n return self.__run_method_on_plugins(\"address_created\", default_value, address)\n\n def address_updated(self, address: \"Address\"):\n default_value = None\n return self.__run_method_on_plugins(\"address_updated\", default_value, address)\n\n def address_deleted(self, address: \"Address\"):\n default_value = None\n return self.__run_method_on_plugins(\"address_deleted\", default_value, address)\n\n def app_installed(self, app: \"App\"):\n default_value = None\n return self.__run_method_on_plugins(\"app_installed\", default_value, app)\n\n def app_updated(self, app: \"App\"):\n default_value = None\n return self.__run_method_on_plugins(\"app_updated\", default_value, app)\n\n def app_deleted(self, app: \"App\"):\n default_value = None\n return self.__run_method_on_plugins(\"app_deleted\", default_value, app)\n\n def app_status_changed(self, app: \"App\"):\n default_value = None\n return self.__run_method_on_plugins(\"app_status_changed\", default_value, app)\n\n def attribute_created(self, attribute: \"Attribute\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_created\", default_value, attribute\n )\n\n def attribute_updated(self, attribute: \"Attribute\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_updated\", default_value, attribute, webhooks=webhooks\n )\n\n def attribute_deleted(self, attribute: \"Attribute\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_deleted\", default_value, attribute, webhooks=webhooks\n )\n\n def attribute_value_created(self, attribute_value: \"AttributeValue\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_value_created\", default_value, attribute_value, webhooks=webhooks\n )\n\n def attribute_value_updated(self, attribute_value: \"AttributeValue\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_value_updated\", default_value, attribute_value\n )\n\n def attribute_value_deleted(self, attribute_value: \"AttributeValue\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_value_deleted\", default_value, attribute_value, webhooks=webhooks\n )\n\n def category_created(self, category: \"Category\"):\n default_value = None\n return self.__run_method_on_plugins(\"category_created\", default_value, category)\n\n def category_updated(self, category: \"Category\"):\n default_value = None\n return self.__run_method_on_plugins(\"category_updated\", default_value, category)\n\n def category_deleted(self, category: \"Category\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"category_deleted\", default_value, category, webhooks=webhooks\n )\n\n def channel_created(self, channel: \"Channel\"):\n default_value = None\n return self.__run_method_on_plugins(\"channel_created\", default_value, channel)\n\n def channel_updated(self, channel: \"Channel\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"channel_updated\", default_value, channel, webhooks=webhooks\n )\n\n def channel_deleted(self, channel: \"Channel\"):\n default_value = None\n return self.__run_method_on_plugins(\"channel_deleted\", default_value, channel)\n\n def channel_status_changed(self, channel: \"Channel\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"channel_status_changed\", default_value, channel\n )\n\n def channel_metadata_updated(self, channel: \"Channel\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"channel_metadata_updated\", default_value, channel\n )\n\n def gift_card_created(self, gift_card: \"GiftCard\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_created\", default_value, gift_card, webhooks=webhooks\n )\n\n def gift_card_updated(self, gift_card: \"GiftCard\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_updated\", default_value, gift_card\n )\n\n def gift_card_deleted(self, gift_card: \"GiftCard\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_deleted\", default_value, gift_card, webhooks=webhooks\n )\n\n def gift_card_sent(self, gift_card: \"GiftCard\", channel_slug: str, email: str):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_sent\",\n default_value,\n gift_card,\n channel_slug,\n email,\n )\n\n def gift_card_status_changed(self, gift_card: \"GiftCard\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_status_changed\", default_value, gift_card, webhooks=webhooks\n )\n\n def gift_card_metadata_updated(self, gift_card: \"GiftCard\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_metadata_updated\", default_value, gift_card\n )\n\n def gift_card_export_completed(self, export: \"ExportFile\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_export_completed\", default_value, export\n )\n\n def menu_created(self, menu: \"Menu\"):\n default_value = None\n return self.__run_method_on_plugins(\"menu_created\", default_value, menu)\n\n def menu_updated(self, menu: \"Menu\"):\n default_value = None\n return self.__run_method_on_plugins(\"menu_updated\", default_value, menu)\n\n def menu_deleted(self, menu: \"Menu\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"menu_deleted\", default_value, menu, webhooks=webhooks\n )\n\n def menu_item_created(self, menu_item: \"MenuItem\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"menu_item_created\", default_value, menu_item\n )\n\n def menu_item_updated(self, menu_item: \"MenuItem\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"menu_item_updated\", default_value, menu_item\n )\n\n def menu_item_deleted(self, menu_item: \"MenuItem\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"menu_item_deleted\", default_value, menu_item, webhooks=webhooks\n )\n\n def shipping_price_created(self, shipping_method: \"ShippingMethod\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_price_created\", default_value, shipping_method\n )\n\n def shipping_price_updated(self, shipping_method: \"ShippingMethod\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_price_updated\", default_value, shipping_method\n )\n\n def shipping_price_deleted(self, shipping_method: \"ShippingMethod\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_price_deleted\", default_value, shipping_method, webhooks=webhooks\n )\n\n def shipping_zone_created(self, shipping_zone: \"ShippingZone\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_zone_created\", default_value, shipping_zone\n )\n\n def shipping_zone_updated(self, shipping_zone: \"ShippingZone\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_zone_updated\", default_value, shipping_zone\n )\n\n def shipping_zone_deleted(self, shipping_zone: \"ShippingZone\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_zone_deleted\", default_value, shipping_zone, webhooks=webhooks\n )\n\n def shipping_zone_metadata_updated(self, shipping_zone: \"ShippingZone\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_zone_metadata_updated\", default_value, shipping_zone\n )\n\n def staff_created(self, staff_user: \"User\"):\n default_value = None\n return self.__run_method_on_plugins(\"staff_created\", default_value, staff_user)\n\n def staff_updated(self, staff_user: \"User\"):\n default_value = None\n return self.__run_method_on_plugins(\"staff_updated\", default_value, staff_user)\n\n def staff_deleted(self, staff_user: \"User\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"staff_deleted\", default_value, staff_user, webhooks=webhooks\n )\n\n def staff_set_password_requested(\n self, user: \"User\", channel_slug: str, token: str, redirect_url: str\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"staff_set_password_requested\",\n default_value,\n user,\n channel_slug,\n token=token,\n redirect_url=redirect_url,\n )\n\n def thumbnail_created(\n self,\n thumbnail: \"Thumbnail\",\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"thumbnail_created\", default_value, thumbnail\n )\n\n def warehouse_created(self, warehouse: \"Warehouse\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"warehouse_created\", default_value, warehouse\n )\n\n def warehouse_updated(self, warehouse: \"Warehouse\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"warehouse_updated\", default_value, warehouse\n )\n\n def warehouse_deleted(self, warehouse: \"Warehouse\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"warehouse_deleted\", default_value, warehouse\n )\n\n def warehouse_metadata_updated(self, warehouse: \"Warehouse\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"warehouse_metadata_updated\", default_value, warehouse\n )\n\n def voucher_created(self, voucher: \"Voucher\", code: str):\n default_value = None\n return self.__run_method_on_plugins(\n \"voucher_created\", default_value, voucher, code\n )\n\n def voucher_updated(self, voucher: \"Voucher\", code: str):\n default_value = None\n return self.__run_method_on_plugins(\n \"voucher_updated\", default_value, voucher, code\n )\n\n def voucher_deleted(self, voucher: \"Voucher\", code: str, webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"voucher_deleted\", default_value, voucher, code, webhooks=webhooks\n )\n\n def voucher_metadata_updated(self, voucher: \"Voucher\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"voucher_metadata_updated\", default_value, voucher\n )\n\n def voucher_code_export_completed(self, export: \"ExportFile\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"voucher_code_export_completed\", default_value, export\n )\n\n def shop_metadata_updated(self, shop: \"SiteSettings\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shop_metadata_updated\", default_value, shop\n )\n\n def initialize_payment(\n self, gateway, payment_data: dict, channel_slug: str\n ) -> Optional[\"InitializedPaymentResponse\"]:\n method_name = \"initialize_payment\"\n default_value = None\n gtw = self.get_plugin(gateway, channel_slug)\n if not gtw:\n return None\n\n return self.__run_method_on_single_plugin(\n gtw,\n method_name,\n previous_value=default_value,\n payment_data=payment_data,\n )\n\n def authorize_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"authorize_payment\", payment_information, channel_slug=channel_slug\n )\n\n def capture_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"capture_payment\", payment_information, channel_slug=channel_slug\n )\n\n def refund_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"refund_payment\", payment_information, channel_slug=channel_slug\n )\n\n def void_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"void_payment\", payment_information, channel_slug=channel_slug\n )\n\n def confirm_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"confirm_payment\", payment_information, channel_slug=channel_slug\n )\n\n def process_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"process_payment\", payment_information, channel_slug=channel_slug\n )\n\n def token_is_required_as_payment_input(\n self, gateway: str, channel_slug: str\n ) -> bool:\n method_name = \"token_is_required_as_payment_input\"\n default_value = True\n gtw = self.get_plugin(gateway, channel_slug=channel_slug)\n if gtw is not None:\n return self.__run_method_on_single_plugin(\n gtw,\n method_name,\n previous_value=default_value,\n )\n return default_value\n\n def get_client_token(\n self,\n gateway,\n token_config: \"TokenConfig\",\n channel_slug: str,\n ) -> str:\n method_name = \"get_client_token\"\n default_value = None\n gtw = self.get_plugin(gateway, channel_slug=channel_slug)\n return self.__run_method_on_single_plugin(\n gtw, method_name, default_value, token_config=token_config\n )\n\n def list_payment_sources(\n self,\n gateway: str,\n customer_id: str,\n channel_slug: str,\n ) -> list[\"CustomerSource\"]:\n default_value: list = []\n gtw = self.get_plugin(gateway, channel_slug=channel_slug)\n if gtw is not None:\n return self.__run_method_on_single_plugin(\n gtw, \"list_payment_sources\", default_value, customer_id=customer_id\n )\n raise Exception(f\"Payment plugin {gateway} is inaccessible!\")\n\n def list_stored_payment_methods(\n self, list_stored_payment_methods_data: \"ListStoredPaymentMethodsRequestData\"\n ) -> list[\"PaymentMethodData\"]:\n default_value: list = []\n return self.__run_method_on_plugins(\n \"list_stored_payment_methods\",\n default_value,\n list_stored_payment_methods_data,\n )\n\n def stored_payment_method_request_delete(\n self,\n request_delete_data: \"StoredPaymentMethodRequestDeleteData\",\n ) -> \"StoredPaymentMethodRequestDeleteResponseData\":\n default_response = StoredPaymentMethodRequestDeleteResponseData(\n result=StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELIVER,\n error=\"Payment method request delete failed to deliver.\",\n )\n response = self.__run_method_on_plugins(\n \"stored_payment_method_request_delete\",\n default_response,\n request_delete_data,\n )\n return response\n\n def payment_gateway_initialize_tokenization(\n self,\n request_data: \"PaymentGatewayInitializeTokenizationRequestData\",\n ) -> \"PaymentGatewayInitializeTokenizationResponseData\":\n default_response = PaymentGatewayInitializeTokenizationResponseData(\n result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,\n error=\"Payment gateway initialize tokenization failed to deliver.\",\n data=None,\n )\n\n response = self.__run_method_on_plugins(\n \"payment_gateway_initialize_tokenization\",\n default_response,\n request_data,\n )\n return response\n\n def payment_method_initialize_tokenization(\n self,\n request_data: \"PaymentMethodProcessTokenizationRequestData\",\n ) -> \"PaymentMethodTokenizationResponseData\":\n default_response = PaymentMethodTokenizationResponseData(\n result=PaymentMethodTokenizationResult.FAILED_TO_DELIVER,\n error=\"Payment method initialize tokenization failed to deliver.\",\n data=None,\n )\n\n response = self.__run_method_on_plugins(\n \"payment_method_initialize_tokenization\",\n default_response,\n request_data,\n )\n return response\n\n def payment_method_process_tokenization(\n self,\n request_data: \"PaymentMethodProcessTokenizationRequestData\",\n ) -> \"PaymentMethodTokenizationResponseData\":\n default_response = PaymentMethodTokenizationResponseData(\n result=PaymentMethodTokenizationResult.FAILED_TO_DELIVER,\n error=\"Payment method process tokenization failed to deliver.\",\n data=None,\n )\n\n response = self.__run_method_on_plugins(\n \"payment_method_process_tokenization\",\n default_response,\n request_data,\n )\n return response\n\n def translation_created(self, translation: \"Translation\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"translation_created\", default_value, translation\n )\n\n def translation_updated(self, translation: \"Translation\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"translation_updated\", default_value, translation\n )\n\n def get_plugins(\n self, channel_slug: Optional[str] = None, active_only=False\n ) -> list[\"BasePlugin\"]:\n \"\"\"Return list of plugins for a given channel.\"\"\"\n if channel_slug:\n plugins = self.plugins_per_channel[channel_slug]\n else:\n plugins = self.all_plugins\n\n if active_only:\n plugins = [plugin for plugin in plugins if plugin.active]\n return plugins\n\n def list_payment_gateways(\n self,\n currency: Optional[str] = None,\n checkout_info: Optional[\"CheckoutInfo\"] = None,\n checkout_lines: Optional[Iterable[\"CheckoutLineInfo\"]] = None,\n channel_slug: Optional[str] = None,\n active_only: bool = True,\n ) -> list[\"PaymentGateway\"]:\n channel_slug = checkout_info.channel.slug if checkout_info else channel_slug\n plugins = self.get_plugins(channel_slug=channel_slug, active_only=active_only)\n payment_plugins = [\n plugin for plugin in plugins if \"process_payment\" in type(plugin).__dict__\n ]\n\n # if currency is given return only gateways which support given currency\n gateways = []\n for plugin in payment_plugins:\n gateways.extend(\n plugin.get_payment_gateways(\n currency=currency,\n checkout_info=checkout_info,\n checkout_lines=checkout_lines,\n previous_value=None,\n )\n )\n return gateways\n\n def list_shipping_methods_for_checkout(\n self,\n checkout: \"Checkout\",\n channel_slug: Optional[str] = None,\n active_only: bool = True,\n ) -> list[\"ShippingMethodData\"]:\n channel_slug = channel_slug if channel_slug else checkout.channel.slug\n plugins = self.get_plugins(channel_slug=channel_slug, active_only=active_only)\n shipping_plugins = [\n plugin\n for plugin in plugins\n if hasattr(plugin, \"get_shipping_methods_for_checkout\")\n ]\n\n shipping_methods = []\n for plugin in shipping_plugins:\n shipping_methods.extend(\n # https://github.com/python/mypy/issues/9975\n getattr(plugin, \"get_shipping_methods_for_checkout\")(checkout, None)\n )\n return shipping_methods\n\n def get_shipping_method(\n self,\n shipping_method_id: str,\n checkout: Optional[\"Checkout\"] = None,\n channel_slug: Optional[str] = None,\n ):\n if checkout:\n methods = {\n method.id: method\n for method in self.list_shipping_methods_for_checkout(\n checkout=checkout, channel_slug=channel_slug\n )\n }\n return methods.get(shipping_method_id)\n return None\n\n def list_external_authentications(self, active_only: bool = True) -> list[dict]:\n auth_basic_method = \"external_obtain_access_tokens\"\n plugins = self.get_plugins(active_only=active_only)\n return [\n {\"id\": plugin.PLUGIN_ID, \"name\": plugin.PLUGIN_NAME}\n for plugin in plugins\n if auth_basic_method in type(plugin).__dict__\n ]\n\n def __run_payment_method(\n self,\n gateway: str,\n method_name: str,\n payment_information: \"PaymentData\",\n channel_slug: str,\n **kwargs,\n ) -> \"GatewayResponse\":\n default_value = None\n plugin = self.get_plugin(gateway, channel_slug)\n if plugin is not None:\n resp = self.__run_method_on_single_plugin(\n plugin,\n method_name,\n previous_value=default_value,\n payment_information=payment_information,\n **kwargs,\n )\n if resp is not None:\n return resp\n\n raise Exception(\n f\"Payment plugin {gateway} for {method_name}\"\n \" payment method is inaccessible!\"\n )\n\n def __run_plugin_method_until_first_success(\n self,\n method_name: str,\n *args,\n channel_slug: Optional[str] = None,\n ):\n plugins = self.get_plugins(channel_slug=channel_slug)\n for plugin in plugins:\n result = self.__run_method_on_single_plugin(\n plugin, method_name, None, *args\n )\n if result is not None:\n return result\n return None\n\n def _get_all_plugin_configs(self):\n with opentracing.global_tracer().start_active_span(\"_get_all_plugin_configs\"):\n if not hasattr(self, \"_plugin_configs\"):\n plugin_configurations = PluginConfiguration.objects.prefetch_related(\n \"channel\"\n ).all()\n self._plugin_configs_per_channel: defaultdict[\n Channel, dict\n ] = defaultdict(dict)\n self._global_plugin_configs = {}\n for pc in plugin_configurations:\n channel = pc.channel\n if channel is None:\n self._global_plugin_configs[pc.identifier] = pc\n else:\n self._plugin_configs_per_channel[channel][pc.identifier] = pc\n return self._global_plugin_configs, self._plugin_configs_per_channel\n\n # FIXME these methods should be more generic\n\n def assign_tax_code_to_object_meta(self, obj: \"TaxClass\", tax_code: Optional[str]):\n default_value = None\n return self.__run_method_on_plugins(\n \"assign_tax_code_to_object_meta\", default_value, obj, tax_code\n )\n\n def get_tax_code_from_object_meta(\n self, obj: Union[\"Product\", \"ProductType\", \"TaxClass\"]\n ) -> TaxType:\n default_value = TaxType(code=\"\", description=\"\")\n return self.__run_method_on_plugins(\n \"get_tax_code_from_object_meta\", default_value, obj\n )\n\n def save_plugin_configuration(\n self, plugin_id, channel_slug: Optional[str], cleaned_data: dict\n ):\n if channel_slug:\n plugins = self.get_plugins(channel_slug=channel_slug)\n channel = (\n Channel.objects.using(self.database).filter(slug=channel_slug).first()\n )\n if not channel:\n return None\n else:\n channel = None\n plugins = self.global_plugins\n\n for plugin in plugins:\n if plugin.PLUGIN_ID == plugin_id:\n plugin_configuration, _ = PluginConfiguration.objects.using(\n self.database\n ).get_or_create(\n identifier=plugin_id,\n channel=channel,\n defaults={\"configuration\": plugin.configuration},\n )\n configuration = plugin.save_plugin_configuration(\n plugin_configuration, cleaned_data\n )\n configuration.name = plugin.PLUGIN_NAME\n configuration.description = plugin.PLUGIN_DESCRIPTION\n plugin.active = configuration.active\n plugin.configuration = configuration.configuration\n return configuration\n\n def get_plugin(\n self, plugin_id: str, channel_slug: Optional[str] = None\n ) -> Optional[\"BasePlugin\"]:\n plugins = self.get_plugins(channel_slug=channel_slug)\n for plugin in plugins:\n if plugin.check_plugin_id(plugin_id):\n return plugin\n return None\n\n def webhook_endpoint_without_channel(\n self, request: SaleorContext, plugin_id: str\n ) -> HttpResponse:\n # This should be removed in 3.0.0-a.25 as we want to give a possibility to have\n # no downtime between RCs\n split_path = request.path.split(plugin_id, maxsplit=1)\n path = None\n if len(split_path) == 2:\n path = split_path[1]\n\n default_value = HttpResponseNotFound()\n plugin = self.get_plugin(plugin_id)\n if not plugin:\n return default_value\n return self.__run_method_on_single_plugin(\n plugin, \"webhook\", default_value, request, path\n )\n\n def webhook(\n self, request: SaleorContext, plugin_id: str, channel_slug: Optional[str] = None\n ) -> HttpResponse:\n split_path = request.path.split(plugin_id, maxsplit=1)\n path = None\n if len(split_path) == 2:\n path = split_path[1]\n\n default_value = HttpResponseNotFound()\n plugin = self.get_plugin(plugin_id, channel_slug=channel_slug)\n if not plugin:\n return default_value\n\n if not plugin.active:\n return default_value\n\n if plugin.CONFIGURATION_PER_CHANNEL and not channel_slug:\n return HttpResponseNotFound(\n \"Incorrect endpoint. Use /plugins/channel/<channel_slug>/\"\n f\"{plugin.PLUGIN_ID}/\"\n )\n\n return self.__run_method_on_single_plugin(\n plugin, \"webhook\", default_value, request, path\n )\n\n def notify(\n self,\n event: \"NotifyEventTypeChoice\",\n payload: dict,\n channel_slug: Optional[str] = None,\n plugin_id: Optional[str] = None,\n ):\n default_value = None\n if plugin_id:\n plugin = self.get_plugin(plugin_id, channel_slug=channel_slug)\n return self.__run_method_on_single_plugin(\n plugin=plugin,\n method_name=\"notify\",\n previous_value=default_value,\n event=event,\n payload=payload,\n )\n return self.__run_method_on_plugins(\n \"notify\", default_value, event, payload, channel_slug=channel_slug\n )\n\n def external_obtain_access_tokens(\n self, plugin_id: str, data: dict, request: SaleorContext\n ) -> ExternalAccessTokens:\n \"\"\"Obtain access tokens from authentication plugin.\"\"\"\n default_value = ExternalAccessTokens()\n plugin = self.get_plugin(plugin_id)\n return self.__run_method_on_single_plugin(\n plugin, \"external_obtain_access_tokens\", default_value, data, request\n )\n\n def external_authentication_url(\n self, plugin_id: str, data: dict, request: SaleorContext\n ) -> dict:\n \"\"\"Handle authentication request.\"\"\"\n default_value = {} # type: ignore\n plugin = self.get_plugin(plugin_id)\n return self.__run_method_on_single_plugin(\n plugin, \"external_authentication_url\", default_value, data, request\n )\n\n def external_refresh(\n self, plugin_id: str, data: dict, request: SaleorContext\n ) -> ExternalAccessTokens:\n \"\"\"Handle authentication refresh request.\"\"\"\n default_value = ExternalAccessTokens()\n plugin = self.get_plugin(plugin_id)\n return self.__run_method_on_single_plugin(\n plugin, \"external_refresh\", default_value, data, request\n )\n\n def authenticate_user(self, request: SaleorContext) -> Optional[\"User\"]:\n \"\"\"Authenticate user which should be assigned to the request.\"\"\"\n default_value = None\n return self.__run_method_on_plugins(\"authenticate_user\", default_value, request)\n\n def external_logout(\n self, plugin_id: str, data: dict, request: SaleorContext\n ) -> dict:\n \"\"\"Logout the user.\"\"\"\n default_value: dict[str, str] = {}\n plugin = self.get_plugin(plugin_id)\n return self.__run_method_on_single_plugin(\n plugin, \"external_logout\", default_value, data, request\n )\n\n def external_verify(\n self, plugin_id: str, data: dict, request: SaleorContext\n ) -> tuple[Optional[\"User\"], dict]:\n \"\"\"Verify the provided authentication data.\"\"\"\n default_data: dict[str, str] = dict()\n default_user: Optional[\"User\"] = None\n default_value = default_user, default_data\n plugin = self.get_plugin(plugin_id)\n return self.__run_method_on_single_plugin(\n plugin, \"external_verify\", default_value, data, request\n )\n\n def excluded_shipping_methods_for_order(\n self,\n order: \"Order\",\n available_shipping_methods: list[\"ShippingMethodData\"],\n ) -> list[ExcludedShippingMethod]:\n return self.__run_method_on_plugins(\n \"excluded_shipping_methods_for_order\",\n [],\n order,\n available_shipping_methods,\n channel_slug=order.channel.slug,\n )\n\n def excluded_shipping_methods_for_checkout(\n self,\n checkout: \"Checkout\",\n available_shipping_methods: list[\"ShippingMethodData\"],\n ) -> list[ExcludedShippingMethod]:\n return self.__run_method_on_plugins(\n \"excluded_shipping_methods_for_checkout\",\n [],\n checkout,\n available_shipping_methods,\n channel_slug=checkout.channel.slug,\n )\n\n def perform_mutation(\n self, mutation_cls: Mutation, root, info: ResolveInfo, data: dict\n ) -> Optional[Union[ExecutionResult, GraphQLError]]:\n \"\"\"Invoke before each mutation is executed.\n\n This allows to trigger specific logic before the mutation is executed\n but only once the permissions are checked.\n\n Returns one of:\n - null if the execution shall continue\n - graphql.GraphQLError\n - graphql.execution.ExecutionResult\n \"\"\"\n return self.__run_method_on_plugins(\n \"perform_mutation\",\n default_value=None,\n mutation_cls=mutation_cls,\n root=root,\n info=info,\n data=data,\n )\n\n def is_event_active_for_any_plugin(\n self, event: str, channel_slug: Optional[str] = None\n ) -> bool:\n \"\"\"Check if any plugin supports defined event.\"\"\"\n plugins = (\n self.plugins_per_channel[channel_slug] if channel_slug else self.all_plugins\n )\n only_active_plugins = [plugin for plugin in plugins if plugin.active]\n return any([plugin.is_event_active(event) for plugin in only_active_plugins])\n\n def _get_channel_map(self):\n return {\n channel.pk: channel\n for channel in Channel.objects.using(self.database).all().iterator()\n }" }, { "identifier": "PaymentMethodProcessTokenizationErrorCode", "path": "saleor/graphql/core/enums.py", "snippet": "class OrderDirection(graphene.Enum):\nclass ReportingPeriod(graphene.Enum):\nclass ErrorPolicy:\n ASC = \"\"\n DESC = \"-\"\n TODAY = \"TODAY\"\n THIS_MONTH = \"THIS_MONTH\"\n IGNORE_FAILED = \"ignore_failed\"\n REJECT_EVERYTHING = \"reject_everything\"\n REJECT_FAILED_ROWS = \"reject_failed_rows\"\n CHOICES = [\n (IGNORE_FAILED, \"Ignore failed\"),\n (REJECT_EVERYTHING, \"Reject everything\"),\n (REJECT_FAILED_ROWS, \"Reject failed rows\"),\n ]\n def description(self):\ndef to_enum(enum_cls, *, type_name=None, **options) -> graphene.Enum:\ndef error_policy_enum_description(enum):" }, { "identifier": "assert_no_permission", "path": "saleor/graphql/tests/utils.py", "snippet": "def assert_no_permission(response):\n content = get_graphql_content_from_response(response)\n assert \"errors\" in content, content\n assert content[\"errors\"][0][\"extensions\"][\"exception\"][\"code\"] == (\n \"PermissionDenied\"\n ), content[\"errors\"]" }, { "identifier": "get_graphql_content", "path": "saleor/graphql/tests/utils.py", "snippet": "def get_graphql_content(response, *, ignore_errors: bool = False):\n \"\"\"Extract GraphQL content from the API response.\n\n Optionally ignore protocol-level errors, eg. schema errors or lack of\n permissions.\n \"\"\"\n content = get_graphql_content_from_response(response)\n if not ignore_errors:\n assert \"errors\" not in content, content[\"errors\"]\n return content" }, { "identifier": "PaymentMethodTokenizationResultEnum", "path": "saleor/graphql/payment/enums.py", "snippet": "class OrderAction(BaseEnum):\n class Meta:\n CAPTURE = \"CAPTURE\"\n MARK_AS_PAID = \"MARK_AS_PAID\"\n REFUND = \"REFUND\"\n VOID = \"VOID\"\n def description(self):\ndef description(enum):" } ]
from unittest.mock import patch from .....payment.interface import ( PaymentMethodProcessTokenizationRequestData, PaymentMethodTokenizationResponseData, PaymentMethodTokenizationResult, ) from .....plugins.manager import PluginsManager from ....core.enums import PaymentMethodProcessTokenizationErrorCode from ....tests.utils import assert_no_permission, get_graphql_content from ...enums import PaymentMethodTokenizationResultEnum import pytest
17,575
PAYMENT_METHOD_PROCESS_TOKENIZATION = """ mutation PaymentMethodProcessTokenization( $id: String!, $channel: String!, $data: JSON){ paymentMethodProcessTokenization(id: $id, channel: $channel, data: $data){ result data id errors{ field code message } } } """ @pytest.mark.parametrize( ("expected_input_data", "expected_output_data"), [ (None, None), (None, {"foo": "bar1"}), ({"foo": "bar2"}, None), ({"foo": "bar3"}, {"foo": "bar4"}), ], ) @patch.object(PluginsManager, "payment_method_process_tokenization") @patch.object(PluginsManager, "is_event_active_for_any_plugin") def test_payment_method_process_tokenization( mocked_is_event_active_for_any_plugin, mocked_payment_method_process_tokenization, expected_input_data, expected_output_data, user_api_client, channel_USD, app, ): # given expected_payment_method_id = "test_id" mocked_is_event_active_for_any_plugin.return_value = True mocked_payment_method_process_tokenization.return_value = ( PaymentMethodTokenizationResponseData( result=PaymentMethodTokenizationResult.SUCCESSFULLY_TOKENIZED, id=expected_payment_method_id, error=None, data=expected_output_data, ) ) expected_id = "test_id" # when response = user_api_client.post_graphql( PAYMENT_METHOD_PROCESS_TOKENIZATION, variables={ "id": expected_id, "channel": channel_USD.slug, "data": expected_input_data, }, ) # then
PAYMENT_METHOD_PROCESS_TOKENIZATION = """ mutation PaymentMethodProcessTokenization( $id: String!, $channel: String!, $data: JSON){ paymentMethodProcessTokenization(id: $id, channel: $channel, data: $data){ result data id errors{ field code message } } } """ @pytest.mark.parametrize( ("expected_input_data", "expected_output_data"), [ (None, None), (None, {"foo": "bar1"}), ({"foo": "bar2"}, None), ({"foo": "bar3"}, {"foo": "bar4"}), ], ) @patch.object(PluginsManager, "payment_method_process_tokenization") @patch.object(PluginsManager, "is_event_active_for_any_plugin") def test_payment_method_process_tokenization( mocked_is_event_active_for_any_plugin, mocked_payment_method_process_tokenization, expected_input_data, expected_output_data, user_api_client, channel_USD, app, ): # given expected_payment_method_id = "test_id" mocked_is_event_active_for_any_plugin.return_value = True mocked_payment_method_process_tokenization.return_value = ( PaymentMethodTokenizationResponseData( result=PaymentMethodTokenizationResult.SUCCESSFULLY_TOKENIZED, id=expected_payment_method_id, error=None, data=expected_output_data, ) ) expected_id = "test_id" # when response = user_api_client.post_graphql( PAYMENT_METHOD_PROCESS_TOKENIZATION, variables={ "id": expected_id, "channel": channel_USD.slug, "data": expected_input_data, }, ) # then
content = get_graphql_content(response)
6
2023-11-13 05:00:35+00:00
24k
kampta/asic
prepare_data.py
[ { "identifier": "CUBDataset", "path": "datasets/cub.py", "snippet": "class CUBDataset(Dataset):\n def __init__(self, data_dir, split='test', img_size=256, cls_idx=1,\n flow_dir=None, num_parts=0,\n mask_threshold=1, use_coseg_masks=False, padding_mode='border'):\n super().__init__()\n self.img_size = img_size\n self.split = split\n self.cls_idx = cls_idx\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n self.fixed_pairs = None\n self.thresholds = None\n self.border = True if padding_mode=='border' else False\n\n os.makedirs(data_dir, exist_ok=True)\n download_cub(data_dir)\n download_cub_metadata(data_dir)\n\n self.files, self.bboxes, self.kps, self.masks = load_acsm_data(\n data_dir, size=img_size, split=split, cls_idx=cls_idx)\n\n imgs = []\n for i in range(len(self.files)):\n img = Image.open(self.files[i]).convert('RGB')\n img = cub_crop(img, self.img_size, self.bboxes[i], border=self.border)\n imgs.append(torch.from_numpy(np.array(img)).permute(2, 0, 1))\n self.imgs = torch.stack(imgs) / 127.5 - 1.0 # normalize (-1, 1)\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{Path(self.files[i]).stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) > mask_threshold).float()\n\n self.parts = None\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{Path(self.files[i]).stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n\n # Load pseudo keypoints\n self.pseudo_kps = None\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "SpairDataset", "path": "datasets/spair.py", "snippet": "class SpairDataset(Dataset):\n def __init__(self, data_dir, split='test', img_size=256, spair_cat='cat',\n flow_dir=None, padding_mode='edge', num_parts=0,\n mask_threshold=1, use_coseg_masks=False):\n super().__init__()\n self.img_size = img_size\n self.split = split\n self.cat = spair_cat\n self.padding_mode = padding_mode\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n\n normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n transform = transforms.Compose([\n SquarePad(padding_mode),\n transforms.Resize(img_size),\n transforms.ToTensor(),\n normalize,\n ])\n\n os.makedirs(data_dir, exist_ok=True)\n spair_dir = download_spair(data_dir)\n\n self.files, self.kps, fixed_pairs, thresholds = load_spair_data(\n spair_dir, size=img_size, split=split, category=spair_cat)\n imgs = [transform(Image.open(self.files[i]).convert('RGB'))\n for i in range(len(self))]\n self.imgs = torch.stack(imgs)\n self.fixed_pairs = np.array(fixed_pairs)\n self.thresholds = np.array(thresholds)\n\n self.masks = torch.ones(len(self), 1, img_size, img_size)\n self.pseudo_kps = None\n self.parts = None\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{Path(self.files[i]).stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) >= mask_threshold).float()\n\n # Load parts\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{Path(self.files[i]).stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n \n # Load pseudo keypoints\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "sample_from_reverse_flow", "path": "models/utils.py", "snippet": "def sample_from_reverse_flow(flow, points):\n # Points are of size B x N x 2 in YX format\n B, N, _ = points.shape\n\n # Reshape flow from (B, H, W, 2) to (B, H, W, 1, 1, 2)\n flow_reshaped = flow.unsqueeze(-2).unsqueeze(-2)\n\n # Reshape points from (B, N, 2) to (B, 1, 1, N, 2, 1)\n points = points.unsqueeze(1).unsqueeze(1).unsqueeze(-1)\n\n # (B, H, W, N)\n similarities = (flow_reshaped @ points)[..., 0, 0]\n distances = points.pow(2).squeeze(-1).sum(dim=-1) + \\\n flow_reshaped.pow(2).sum(dim=-1).squeeze(-1) - 2 * similarities\n\n nearest_neighbors = distances.reshape(\n B, flow_reshaped.size(1) * flow_reshaped.size(2), N).argmin(dim=1)\n points_transfered = unravel_index(\n nearest_neighbors, (flow_reshaped.size(1), flow_reshaped.size(2)))\n return points_transfered" }, { "identifier": "str2bool", "path": "commons/utils.py", "snippet": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')" }, { "identifier": "draw_kps", "path": "commons/draw.py", "snippet": "def draw_kps(src_img, trg_img, src_kps, trg_kps, kps_colors=None, lines=True):\n # Expects kps in (x, y) order to make it compatible with splat_points\n if type(src_img) is torch.Tensor:\n src_img = (src_img.permute(1, 2, 0) + 1)*127.5\n src_img = Image.fromarray(src_img.cpu().numpy().astype(np.uint8))\n else:\n src_img = src_img.copy()\n if type(trg_img) is torch.Tensor:\n trg_img = (trg_img.permute(1, 2, 0) + 1)*127.5\n trg_img = Image.fromarray(trg_img.cpu().numpy().astype(np.uint8))\n else:\n trg_img = trg_img.copy()\n\n if type(src_kps) is torch.Tensor:\n src_kps = src_kps.cpu().numpy()\n\n if type(trg_kps) is torch.Tensor:\n trg_kps = trg_kps.cpu().numpy()\n\n if kps_colors is None:\n # kps_colors = ['black'] * len(src_kps)\n # kps_colors = np.array(sns.color_palette(n_colors=len(src_kps)))\n kps_colors = get_colors(len(src_kps))\n kps_colors = (kps_colors * 255).astype(np.uint8)\n kps_colors = [tuple(col) for col in kps_colors]\n elif type(kps_colors) is torch.Tensor:\n kps_colors = (kps_colors * 255).cpu().numpy().astype(np.uint8)\n kps_colors = [tuple(col) for col in kps_colors]\n\n src_imsize = src_img.size\n trg_imsize = trg_img.size\n\n assert len(src_kps) == len(trg_kps), \\\n 'The number of matching key-points NOT same'\n\n src_draw = ImageDraw.Draw(src_img)\n trg_draw = ImageDraw.Draw(trg_img)\n\n kps_radius = 4 # if lines else 1.5\n\n for kp_id, (src_kp, trg_kp) in enumerate(zip(src_kps, trg_kps)): \n src_draw.ellipse((src_kp[0] - kps_radius, src_kp[1] - kps_radius,\n src_kp[0] + kps_radius, src_kp[1] + kps_radius),\n fill=kps_colors[kp_id], outline='white')\n trg_draw.ellipse((trg_kp[0] - kps_radius, trg_kp[1] - kps_radius,\n trg_kp[0] + kps_radius, trg_kp[1] + kps_radius),\n fill=kps_colors[kp_id], outline='white')\n\n total_width = src_imsize[0] + trg_imsize[0]\n total_height = max(src_imsize[1], trg_imsize[1])\n des_img = Image.new(\"RGB\", (total_width, total_height), color='black')\n\n new_im = Image.new('RGB', (total_width, total_height))\n new_im.paste(src_img, (0, 0))\n new_im.paste(trg_img, (src_imsize[0], 0))\n new_im.paste(des_img, (0, max(src_imsize[1], trg_imsize[1])))\n new_im_draw = ImageDraw.Draw(new_im)\n\n if lines:\n for kp_id, (src_kp, trg_kp) in enumerate(zip(src_kps, trg_kps)):\n new_im_draw.line(\n (src_kp[0], src_kp[1], trg_kp[0] + src_imsize[1], trg_kp[1]),\n fill=kps_colors[int(kp_id)], width=2)\n return new_im" }, { "identifier": "get_dense_colors", "path": "commons/draw.py", "snippet": "def get_dense_colors(points, resolution=256):\n colors = color_wheel_fast_smooth(resolution)\n if len(points.shape) == 2:\n return colors[points[:, 0], points[:, 1]]\n else:\n device = points.device\n N = len(points)\n colors = colors.permute(2, 0, 1).unsqueeze(0).expand(N, -1, -1, -1)\n points = map_minmax(points, 0, resolution-1, -1, 1).unsqueeze(-2)\n colors = F.grid_sample(colors.to(device), points, align_corners=False)\n return colors.squeeze(-1).permute(0, 2, 1)" }, { "identifier": "splat_points", "path": "commons/draw.py", "snippet": "@torch.inference_mode()\ndef splat_points(images, points, sigma, opacity, colorscale='turbo',\n colors=None, alpha_channel=None, blend_alg='alpha'):\n \"\"\"\n Highly efficient GPU-based splatting algorithm. This function is a wrapper\n for Splat2D to overlay points on images. For highest performance, use the\n colors argument directly instead of colorscale.\n images: (N, C, H, W) tensor in [-1, +1]\n points: (N, P, 2) tensor with values in [0, resolution - 1]\n (can be sub-pixel/non-integer coordinates)\n Can also be (N, K, P, 2) tensor, in which case points[:, i]\n gets a unique colorscale\n Expects points in (x, y) order.\n sigma: either float or (N,) tensor with values > 0\n controls the size of the splatted points\n opacity: float in [0, 1], controls the opacity of the splatted points\n colorscale: [Optional] str (or length-K list of str if points is size\n (N, K, P, 2)) indicating the Plotly colorscale to visualize\n points with\n colors: [Optional] (N, P, 3) tensor (or (N, K*P, 3)). If specified,\n colorscale will be ignored. Computing the colorscale\n often takes several orders of magnitude longer than the GPU-based\n splatting, so pre-computing the colors and passing them here\n instead of using the colorscale argument can provide a significant\n speed-up.\n alpha_channel: [Optional] (N, P, 1) tensor (or (N, K*P, 1)). If specified,\n colors will be blended into the output image based on the\n opacity values in alpha_channel (between 0 and 1).\n blend_alg: [Optiona] str. Specifies the blending algorithm to use when\n merging points into images. Can use alpha compositing ('alpha'),\n Laplacian Pyramid Blending ('laplacian') or a more conservative\n version of Laplacian Blending ('laplacian_light')\n :return (N, C, H, W) tensor in [-1, +1] with points splatted onto images\n \"\"\"\n assert images.dim() == 4 # (N, C, H, W)\n assert points.dim() == 3 or points.dim() == 4 # (N, P, 2) or (N, K, P, 2)\n batch_size = images.size(0)\n # each index in the second dimension gets a unique colorscale\n if points.dim() == 4:\n num_points = points.size(2)\n points = points.reshape(\n points.size(0), points.size(1) * points.size(2), 2) # (N, K*P, 2)\n if colors is None:\n if isinstance(colorscale, str):\n colorscale = [colorscale]\n assert len(colorscale) == points.size(1)\n # (1, K*P, 3)\n colors = torch.cat([\n get_plotly_colors(num_points, c) for c in colorscale], 1)\n colors = colors.repeat(batch_size, 1, 1) # (N, K*P, 3)\n elif colors is None:\n num_points = points.size(1)\n # All batch elements use the same colorscale\n if isinstance(colorscale, str):\n # (N, P, 3)\n colors = get_plotly_colors(\n points.size(1), colorscale).repeat(batch_size, 1, 1)\n else: # Each batch element uses its own colorscale\n assert len(colorscale) == batch_size\n colors = torch.cat([get_plotly_colors(num_points, c)\n for c in colorscale], 0)\n if alpha_channel is None:\n alpha_channel = torch.ones(\n batch_size, points.size(1), 1, device='cuda')\n if isinstance(sigma, (float, int)):\n sigma = torch.tensor(\n sigma, device='cuda', dtype=torch.float).view(1).repeat(batch_size)\n blank_img = torch.zeros(batch_size, images.size(1), images.size(2),\n images.size(3), device='cuda')\n blank_mask = torch.zeros(batch_size, 1, images.size(2), images.size(3),\n device='cuda')\n # (N, C, H, W)\n prop_obj_img = splat2d(blank_img, points, colors, sigma, False)\n # (N, 1, H, W)\n prop_mask_img = splat2d(blank_mask, points, alpha_channel, sigma, True)\n prop_mask_img *= opacity\n if blend_alg == 'alpha':\n # basic alpha-composite\n out = prop_mask_img * prop_obj_img + (1 - prop_mask_img) * images\n elif blend_alg == 'laplacian':\n blender = LaplacianBlender().to(images.device)\n out = blender(images, prop_obj_img, prop_mask_img)\n elif blend_alg == 'laplacian_light':\n blender = LaplacianBlender(levels=3, gaussian_kernel_size=11,\n gaussian_sigma=0.5).to(images.device)\n out = blender(images, prop_obj_img, prop_mask_img)\n return out" }, { "identifier": "load_fg_points", "path": "commons/draw.py", "snippet": "def load_fg_points(img_mask, resolution=None, normalize=False, device='cuda'):\n # returns points in XY format\n if resolution is None:\n resolution = img_mask.size(-1)\n us = vs = torch.arange(resolution)\n us, vs = torch.meshgrid(us, vs, indexing='xy')\n points = torch.stack([us.reshape(-1), vs.reshape(-1)]).permute(1, 0)\n points = points.unsqueeze(0).expand(img_mask.size(0), -1, -1)\n points = points.to(device)\n\n img_mask = img_mask.float()\n if len(img_mask.shape) == 3:\n img_mask = img_mask.unsqueeze(1)\n scale_factor = resolution / img_mask.size(2)\n if resolution != img_mask.size(2): # resize the mask:\n img_mask = F.interpolate(img_mask, scale_factor=scale_factor,\n mode='bilinear')\n\n img_mask = img_mask.squeeze(1)\n points_alpha = img_mask.reshape(img_mask.size(0), -1)\n points = points / (resolution-1)\n if not normalize:\n points *= (img_mask.size(2)/scale_factor-1)\n\n colors = color_wheel_fast_smooth(resolution).to(device)\n colors = colors.reshape(1, -1, 3).expand(img_mask.size(0), -1, -1)\n\n return points, points_alpha, colors" }, { "identifier": "mls_rigid_deformation", "path": "thirdparty/MLS/mls.py", "snippet": "def mls_rigid_deformation(p, q, alpha=1.0, eps=1e-8, resolution=256):\n \"\"\" Rigid deformation\n\n Parameters\n ----------\n vx, vy: torch.Tensor\n coordinate grid, generated by torch.meshgrid(gridX, gridY)\n p: torch.Tensor\n an array with size [n, 2], original control points, in (y, x) formats\n q: torch.Tensor\n an array with size [n, 2], final control points, in (y, x) formats\n alpha: float\n parameter used by weights\n eps: float\n epsilon\n\n Return\n ------\n A deformed image.\n \"\"\"\n device = q.device\n gridY = torch.arange(resolution, device=device)\n gridX = torch.arange(resolution, device=device)\n vx, vy = torch.meshgrid(gridX, gridY, indexing='xy')\n\n q = q.short()\n p = p.short()\n\n # Exchange p and q and hence we transform destination pixels to the\n # corresponding source pixels.\n p, q = q, p\n\n grow = vx.shape[0] # grid rows\n gcol = vx.shape[1] # grid cols\n ctrls = p.shape[0] # control points\n\n # Compute\n reshaped_p = p.reshape(ctrls, 2, 1, 1) # [ctrls, 2, 1, 1]\n # [2, grow, gcol]\n reshaped_v = torch.cat((vx.reshape(1, grow, gcol),\n vy.reshape(1, grow, gcol)), dim=0)\n # [ctrls, grow, gcol]\n w = 1.0 / (torch.sum((reshaped_p - reshaped_v).float() ** 2, dim=1)\n + eps) ** alpha\n # [ctrls, grow, gcol]\n w /= torch.sum(w, dim=0, keepdim=True)\n\n pstar = torch.zeros((2, grow, gcol), dtype=torch.float32).to(device)\n for i in range(ctrls):\n # [2, grow, gcol]\n pstar += w[i] * reshaped_p[i]\n\n # [2, grow, gcol]\n vpstar = reshaped_v - pstar\n # [2, 1, grow, gcol]\n reshaped_vpstar = vpstar.reshape(2, 1, grow, gcol)\n # [2, grow, gcol]\n neg_vpstar_verti = vpstar[[1, 0], ...]\n neg_vpstar_verti[1, ...] = -neg_vpstar_verti[1, ...]\n # [2, 1, grow, gcol]\n reshaped_neg_vpstar_verti = neg_vpstar_verti.reshape(2, 1, grow, gcol)\n # [2, 2, grow, gcol]\n mul_right = torch.cat((reshaped_vpstar, reshaped_neg_vpstar_verti), dim=1)\n # [2, 2, grow, gcol]\n reshaped_mul_right = mul_right.reshape(2, 2, grow, gcol)\n\n # Calculate q\n # [ctrls, 2, 1, 1]\n reshaped_q = q.reshape((ctrls, 2, 1, 1))\n qstar = torch.zeros((2, grow, gcol), dtype=torch.float32).to(device)\n for i in range(ctrls):\n # [2, grow, gcol]\n qstar += w[i] * reshaped_q[i]\n\n temp = torch.zeros((grow, gcol, 2), dtype=torch.float32).to(device)\n for i in range(ctrls):\n # [2, grow, gcol]\n phat = reshaped_p[i] - pstar\n # [1, 2, grow, gcol]\n reshaped_phat = phat.reshape(1, 2, grow, gcol)\n # [1, 1, grow, gcol]\n reshaped_w = w[i].reshape(1, 1, grow, gcol)\n # [2, grow, gcol]\n neg_phat_verti = phat[[1, 0]]\n neg_phat_verti[1] = -neg_phat_verti[1]\n # [1, 2, grow, gcol]\n reshaped_neg_phat_verti = neg_phat_verti.reshape(1, 2, grow, gcol)\n # [2, 2, grow, gcol]\n mul_left = torch.cat((reshaped_phat, reshaped_neg_phat_verti), dim=0)\n\n # [grow, gcol, 2, 2]\n A = torch.matmul((reshaped_w * mul_left).permute(2, 3, 0, 1),\n reshaped_mul_right.permute(2, 3, 0, 1))\n\n # [2, grow, gcol]\n qhat = reshaped_q[i] - qstar\n # [grow, gcol, 1, 2]\n reshaped_qhat = qhat.reshape(1, 2, grow, gcol).permute(2, 3, 0, 1)\n\n # Get final image transfomer -- 3-D array\n # [grow, gcol, 2]\n temp += torch.matmul(reshaped_qhat, A).reshape(grow, gcol, 2)\n\n # [2, grow, gcol]\n temp = temp.permute(2, 0, 1)\n # [1, grow, gcol]\n normed_temp = torch.norm(temp, dim=0, keepdim=True)\n # [1, grow, gcol]\n normed_vpstar = torch.norm(vpstar, dim=0, keepdim=True)\n # [2, grow, gcol]\n transformers = temp / normed_temp * normed_vpstar + qstar\n nan_mask = normed_temp[0] == 0\n\n # Replace nan values by interpolated values\n nan_mask_flat = torch.nonzero(nan_mask.view(-1), as_tuple=True)[0]\n nan_mask_anti_flat = torch.nonzero(~nan_mask.view(-1), as_tuple=True)[0]\n transformers[0][nan_mask] = interp(nan_mask_flat, nan_mask_anti_flat,\n transformers[0][~nan_mask])\n transformers[1][nan_mask] = interp(nan_mask_flat, nan_mask_anti_flat,\n transformers[1][~nan_mask])\n\n # Remove the points outside the border\n transformers[transformers < 0] = 0\n transformers[0][transformers[0] > grow - 1] = 0\n transformers[1][transformers[1] > gcol - 1] = 0\n\n return transformers.long()" }, { "identifier": "map_minmax", "path": "commons/utils.py", "snippet": "def map_minmax(x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min" }, { "identifier": "coseg_from_feat", "path": "thirdparty/dino_vit_features/cosegmentation.py", "snippet": "def coseg_from_feat(\n images_list, descriptors_list, saliency_maps_list, img_size, num_patches,\n elbow: float = 0.975, thresh: float = 0.065, votes_percentage: int = 75,\n sample_interval: int = 100):\n\n num_images = len(images_list)\n print(\"Cluster all images using k-means\")\n all_descriptors = np.ascontiguousarray(np.concatenate(descriptors_list)).copy()\n # normalized_all_descriptors = all_descriptors.astype(np.float32)\n faiss.normalize_L2(all_descriptors) # in-place operation\n sampled_descriptors_list = [x[::sample_interval] for x in descriptors_list]\n all_sampled_descriptors = np.ascontiguousarray(np.concatenate(sampled_descriptors_list)).copy()\n # normalized_all_sampled_descriptors = all_sampled_descriptors.astype(np.float32)\n faiss.normalize_L2(all_sampled_descriptors) # in-place operation\n\n sum_of_squared_dists = []\n n_cluster_range = list(range(1, 15))\n for n_clusters in n_cluster_range:\n algorithm = faiss.Kmeans(d=all_sampled_descriptors.shape[1], k=n_clusters, niter=300, nredo=10)\n algorithm.train(all_sampled_descriptors)\n squared_distances, labels = algorithm.index.search(all_descriptors, 1)\n objective = squared_distances.sum()\n sum_of_squared_dists.append(objective / all_descriptors.shape[0])\n if (len(sum_of_squared_dists) > 1 and sum_of_squared_dists[-1] > elbow * sum_of_squared_dists[-2]):\n break\n\n num_labels = np.max(n_clusters) + 1\n num_descriptors_per_image = [num_patches * num_patches] * num_images\n labels_per_image = np.split(labels, np.cumsum(num_descriptors_per_image))\n\n print(\"Use saliency maps to vote for salient clusters\")\n votes = np.zeros(num_labels)\n for image_labels, saliency_map in zip(labels_per_image, saliency_maps_list):\n for label in range(num_labels):\n label_saliency = saliency_map[image_labels[:, 0] == label].mean()\n if label_saliency > thresh:\n votes[label] += 1\n salient_labels = np.where(votes >= np.ceil(num_images * votes_percentage / 100))\n\n print(\"Create masks using the salient labels\")\n segmentation_masks = []\n for img, labels in zip(images_list, labels_per_image):\n mask = np.isin(labels, salient_labels).reshape(num_patches, num_patches)\n resized_mask = np.array(Image.fromarray(mask).resize((img_size, img_size), resample=Image.LANCZOS))\n try:\n # apply grabcut on mask\n grabcut_kernel_size = (7, 7)\n kernel = np.ones(grabcut_kernel_size, np.uint8)\n forground_mask = cv2.erode(np.uint8(resized_mask), kernel)\n forground_mask = np.array(Image.fromarray(forground_mask).resize(img.size, Image.NEAREST))\n background_mask = cv2.erode(np.uint8(1 - resized_mask), kernel)\n background_mask = np.array(Image.fromarray(background_mask).resize(img.size, Image.NEAREST))\n full_mask = np.ones((load_size[0], load_size[1]), np.uint8) * cv2.GC_PR_FGD\n full_mask[background_mask == 1] = cv2.GC_BGD\n full_mask[forground_mask == 1] = cv2.GC_FGD\n bgdModel = np.zeros((1, 65), np.float64)\n fgdModel = np.zeros((1, 65), np.float64)\n cv2.grabCut(np.array(img), full_mask, None, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK)\n grabcut_mask = np.where((full_mask == 2) | (full_mask == 0), 0, 1).astype('uint8')\n except Exception:\n # if mask is unfitted from gb (e.g. all zeros) -- don't apply it\n grabcut_mask = resized_mask.astype('uint8')\n\n grabcut_mask = Image.fromarray(np.array(grabcut_mask, dtype=bool))\n segmentation_masks.append(grabcut_mask)\n\n return segmentation_masks" }, { "identifier": "ViTExtractor", "path": "thirdparty/dino_vit_features/extractor.py", "snippet": "class ViTExtractor:\n \"\"\" This class facilitates extraction of features, descriptors, and saliency maps from a ViT.\n\n We use the following notation in the documentation of the module's methods:\n B - batch size\n h - number of heads. usually takes place of the channel dimension in pytorch's convention BxCxHxW\n p - patch size of the ViT. either 8 or 16.\n t - number of tokens. equals the number of patches + 1, e.g. HW / p**2 + 1. Where H and W are the height and width\n of the input image.\n d - the embedding dimension in the ViT.\n \"\"\"\n\n def __init__(self, model_type: str = 'dino_vits8', stride: int = 4, model: nn.Module = None, device: str = 'cuda'):\n \"\"\"\n :param model_type: A string specifying the type of model to extract from.\n [dino_vits8 | dino_vits16 | dino_vitb8 | dino_vitb16 | vit_small_patch8_224 |\n vit_small_patch16_224 | vit_base_patch8_224 | vit_base_patch16_224]\n :param stride: stride of first convolution layer. small stride -> higher resolution.\n :param model: Optional parameter. The nn.Module to extract from instead of creating a new one in ViTExtractor.\n should be compatible with model_type.\n \"\"\"\n self.model_type = model_type\n self.device = device\n if model is not None:\n self.model = model\n else:\n self.model = ViTExtractor.create_model(model_type)\n\n self.model = ViTExtractor.patch_vit_resolution(self.model, stride=stride)\n self.model.eval()\n self.model.to(self.device)\n patch_size = self.model.patch_embed.patch_size\n if type(patch_size) == tuple:\n patch_size = patch_size[0]\n self.p = patch_size\n self.stride = self.model.patch_embed.proj.stride\n\n self.mean = (0.485, 0.456, 0.406) if \"dino\" in self.model_type else (0.5, 0.5, 0.5)\n self.std = (0.229, 0.224, 0.225) if \"dino\" in self.model_type else (0.5, 0.5, 0.5)\n\n self._feats = []\n self.hook_handlers = []\n self.load_size = None\n self.num_patches = None\n\n @staticmethod\n def create_model(model_type: str) -> nn.Module:\n \"\"\"\n :param model_type: a string specifying which model to load. [dino_vits8 | dino_vits16 | dino_vitb8 |\n dino_vitb16 | vit_small_patch8_224 | vit_small_patch16_224 | vit_base_patch8_224 |\n vit_base_patch16_224]\n :return: the model\n \"\"\"\n if 'dinov2' in model_type:\n model = torch.hub.load('facebookresearch/dinov2', model_type)\n elif 'dino' in model_type:\n model = torch.hub.load('facebookresearch/dino:main', model_type)\n else: # model from timm -- load weights from timm to dino model (enables working on arbitrary size images).\n temp_model = timm.create_model(model_type, pretrained=True)\n model_type_dict = {\n 'vit_small_patch16_224': 'dino_vits16',\n 'vit_small_patch8_224': 'dino_vits8',\n 'vit_base_patch16_224': 'dino_vitb16',\n 'vit_base_patch8_224': 'dino_vitb8'\n }\n model = torch.hub.load('facebookresearch/dino:main', model_type_dict[model_type])\n temp_state_dict = temp_model.state_dict()\n del temp_state_dict['head.weight']\n del temp_state_dict['head.bias']\n model.load_state_dict(temp_state_dict)\n return model\n\n @staticmethod\n def _fix_pos_enc(patch_size: int, stride_hw: Tuple[int, int]):\n \"\"\"\n Creates a method for position encoding interpolation.\n :param patch_size: patch size of the model.\n :param stride_hw: A tuple containing the new height and width stride respectively.\n :return: the interpolation method\n \"\"\"\n def interpolate_pos_encoding(self, x: torch.Tensor, w: int, h: int) -> torch.Tensor:\n npatch = x.shape[1] - 1\n N = self.pos_embed.shape[1] - 1\n if npatch == N and w == h:\n return self.pos_embed\n class_pos_embed = self.pos_embed[:, 0]\n patch_pos_embed = self.pos_embed[:, 1:]\n dim = x.shape[-1]\n # compute number of tokens taking stride into account\n w0 = 1 + (w - patch_size) // stride_hw[1]\n h0 = 1 + (h - patch_size) // stride_hw[0]\n assert (w0 * h0 == npatch), f\"\"\"got wrong grid size for {h}x{w} with patch_size {patch_size} and \n stride {stride_hw} got {h0}x{w0}={h0 * w0} expecting {npatch}\"\"\"\n # we add a small number to avoid floating point error in the interpolation\n # see discussion at https://github.com/facebookresearch/dino/issues/8\n w0, h0 = w0 + 0.1, h0 + 0.1\n patch_pos_embed = nn.functional.interpolate(\n patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),\n scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),\n mode='bicubic',\n align_corners=False, recompute_scale_factor=False\n )\n assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]\n patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)\n return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)\n\n return interpolate_pos_encoding\n\n @staticmethod\n def _fix_patch_embed():\n \"\"\"\n Creates a method for position encoding interpolation.\n :param patch_size: patch size of the model.\n :param stride_hw: A tuple containing the new height and width stride respectively.\n :return: the interpolation method\n \"\"\"\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n _, _, H, W = x.shape\n\n ## Remove the assertions\n # patch_H, patch_W = self.patch_size\n # assert H % patch_H == 0, f\"Input image height {H} is not a multiple of patch height {patch_H}\"\n # assert W % patch_W == 0, f\"Input image width {W} is not a multiple of patch width: {patch_W}\"\n\n x = self.proj(x) # B C H W\n H, W = x.size(2), x.size(3)\n x = x.flatten(2).transpose(1, 2) # B HW C\n if hasattr(self, \"norm\"):\n # DINO-v2\n x = self.norm(x)\n if not self.flatten_embedding:\n x = x.reshape(-1, H, W, self.embed_dim) # B H W C\n return x\n else:\n # DINO-v1\n return x\n\n return forward\n\n @staticmethod\n def patch_vit_resolution(model: nn.Module, stride: int) -> nn.Module:\n \"\"\"\n change resolution of model output by changing the stride of the patch extraction.\n :param model: the model to change resolution for.\n :param stride: the new stride parameter.\n :return: the adjusted model\n \"\"\"\n patch_size = model.patch_embed.patch_size\n if type(patch_size) == tuple:\n patch_size = patch_size[0]\n if stride == patch_size: # nothing to do\n return model\n\n stride = nn_utils._pair(stride)\n assert all([(patch_size // s_) * s_ == patch_size for s_ in\n stride]), f'stride {stride} should divide patch_size {patch_size}'\n\n # fix the stride\n model.patch_embed.proj.stride = stride\n # fix the positional encoding code\n model.interpolate_pos_encoding = types.MethodType(ViTExtractor._fix_pos_enc(patch_size, stride), model)\n # fix the patch embedding\n model.patch_embed.forward = types.MethodType(ViTExtractor._fix_patch_embed(), model.patch_embed)\n return model\n\n def preprocess(self, image_path: Union[str, Path],\n load_size: Union[int, Tuple[int, int]] = None) -> Tuple[torch.Tensor, Image.Image]:\n \"\"\"\n Preprocesses an image before extraction.\n :param image_path: path to image to be extracted.\n :param load_size: optional. Size to resize image before the rest of preprocessing.\n :return: a tuple containing:\n (1) the preprocessed image as a tensor to insert the model of shape BxCxHxW.\n (2) the pil image in relevant dimensions\n \"\"\"\n pil_image = Image.open(image_path).convert('RGB')\n if load_size is not None:\n pil_image = transforms.Resize(load_size, interpolation=transforms.InterpolationMode.LANCZOS)(pil_image)\n prep = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=self.mean, std=self.std)\n ])\n prep_img = prep(pil_image)[None, ...]\n return prep_img, pil_image\n\n def _get_hook(self, facet: str):\n \"\"\"\n generate a hook method for a specific block and facet.\n \"\"\"\n if facet in ['attn', 'token']:\n def _hook(model, input, output):\n self._feats.append(output)\n return _hook\n\n if facet == 'query':\n facet_idx = 0\n elif facet == 'key':\n facet_idx = 1\n elif facet == 'value':\n facet_idx = 2\n else:\n raise TypeError(f\"{facet} is not a supported facet.\")\n\n def _inner_hook(module, input, output):\n input = input[0]\n B, N, C = input.shape\n qkv = module.qkv(input).reshape(B, N, 3, module.num_heads, C // module.num_heads).permute(2, 0, 3, 1, 4)\n self._feats.append(qkv[facet_idx]) #Bxhxtxd\n return _inner_hook\n\n def _register_hooks(self, layers: List[int], facet: str) -> None:\n \"\"\"\n register hook to extract features.\n :param layers: layers from which to extract features.\n :param facet: facet to extract. One of the following options: ['key' | 'query' | 'value' | 'token' | 'attn']\n \"\"\"\n for block_idx, block in enumerate(self.model.blocks):\n if block_idx in layers:\n if facet == 'token':\n self.hook_handlers.append(block.register_forward_hook(self._get_hook(facet)))\n elif facet == 'attn':\n self.hook_handlers.append(block.attn.attn_drop.register_forward_hook(self._get_hook(facet)))\n elif facet in ['key', 'query', 'value']:\n self.hook_handlers.append(block.attn.register_forward_hook(self._get_hook(facet)))\n else:\n raise TypeError(f\"{facet} is not a supported facet.\")\n\n def _unregister_hooks(self) -> None:\n \"\"\"\n unregisters the hooks. should be called after feature extraction.\n \"\"\"\n for handle in self.hook_handlers:\n handle.remove()\n self.hook_handlers = []\n\n def _extract_features(self, batch: torch.Tensor, layers: List[int] = 11, facet: str = 'key') -> List[torch.Tensor]:\n \"\"\"\n extract features from the model\n :param batch: batch to extract features for. Has shape BxCxHxW.\n :param layers: layer to extract. A number between 0 to 11.\n :param facet: facet to extract. One of the following options: ['key' | 'query' | 'value' | 'token' | 'attn']\n :return : tensor of features.\n if facet is 'key' | 'query' | 'value' has shape Bxhxtxd\n if facet is 'attn' has shape Bxhxtxt\n if facet is 'token' has shape Bxtxd\n \"\"\"\n B, C, H, W = batch.shape\n self._feats = []\n self._register_hooks(layers, facet)\n _ = self.model(batch)\n self._unregister_hooks()\n self.load_size = (H, W)\n self.num_patches = (1 + (H - self.p) // self.stride[0], 1 + (W - self.p) // self.stride[1])\n return self._feats\n\n def get_num_patches(self, H, W):\n return (1 + (H - self.p) // self.stride[0], 1 + (W - self.p) // self.stride[1])\n\n def _log_bin(self, x: torch.Tensor, hierarchy: int = 2) -> torch.Tensor:\n \"\"\"\n create a log-binned descriptor.\n :param x: tensor of features. Has shape Bxhxtxd.\n :param hierarchy: how many bin hierarchies to use.\n \"\"\"\n B = x.shape[0]\n num_bins = 1 + 8 * hierarchy\n\n bin_x = x.permute(0, 2, 3, 1).flatten(start_dim=-2, end_dim=-1) # Bx(t-1)x(dxh)\n bin_x = bin_x.permute(0, 2, 1)\n bin_x = bin_x.reshape(B, bin_x.shape[1], self.num_patches[0], self.num_patches[1])\n # Bx(dxh)xnum_patches[0]xnum_patches[1]\n sub_desc_dim = bin_x.shape[1]\n\n avg_pools = []\n # compute bins of all sizes for all spatial locations.\n for k in range(0, hierarchy):\n # avg pooling with kernel 3**kx3**k\n win_size = 3 ** k\n avg_pool = torch.nn.AvgPool2d(win_size, stride=1, padding=win_size // 2, count_include_pad=False)\n avg_pools.append(avg_pool(bin_x))\n\n bin_x = torch.zeros((B, sub_desc_dim * num_bins, self.num_patches[0], self.num_patches[1])).to(self.device)\n for y in range(self.num_patches[0]):\n for x in range(self.num_patches[1]):\n part_idx = 0\n # fill all bins for a spatial location (y, x)\n for k in range(0, hierarchy):\n kernel_size = 3 ** k\n for i in range(y - kernel_size, y + kernel_size + 1, kernel_size):\n for j in range(x - kernel_size, x + kernel_size + 1, kernel_size):\n if i == y and j == x and k != 0:\n continue\n if 0 <= i < self.num_patches[0] and 0 <= j < self.num_patches[1]:\n bin_x[:, part_idx * sub_desc_dim: (part_idx + 1) * sub_desc_dim, y, x] = avg_pools[k][\n :, :, i, j]\n else: # handle padding in a more delicate way than zero padding\n temp_i = max(0, min(i, self.num_patches[0] - 1))\n temp_j = max(0, min(j, self.num_patches[1] - 1))\n bin_x[:, part_idx * sub_desc_dim: (part_idx + 1) * sub_desc_dim, y, x] = avg_pools[k][\n :, :, temp_i,\n temp_j]\n part_idx += 1\n bin_x = bin_x.flatten(start_dim=-2, end_dim=-1).permute(0, 2, 1).unsqueeze(dim=1)\n # Bx1x(t-1)x(dxh)\n return bin_x\n\n def extract_descriptors(self, batch: torch.Tensor, layer: int = 11, facet: str = 'key',\n bin: bool = False, include_cls: bool = False) -> torch.Tensor:\n \"\"\"\n extract descriptors from the model\n :param batch: batch to extract descriptors for. Has shape BxCxHxW.\n :param layers: layer to extract. A number between 0 to 11.\n :param facet: facet to extract. One of the following options: ['key' | 'query' | 'value' | 'token']\n :param bin: apply log binning to the descriptor. default is False.\n :return: tensor of descriptors. Bx1xtxd' where d' is the dimension of the descriptors.\n \"\"\"\n assert facet in ['key', 'query', 'value', 'token'], f\"\"\"{facet} is not a supported facet for descriptors. \n choose from ['key' | 'query' | 'value' | 'token'] \"\"\"\n self._extract_features(batch, [layer], facet)\n x = self._feats[0]\n if facet == 'token':\n x.unsqueeze_(dim=1) #Bx1xtxd\n if not include_cls:\n x = x[:, :, 1:, :] # remove cls token\n else:\n assert not bin, \"bin = True and include_cls = True are not supported together, set one of them False.\"\n if not bin:\n desc = x.permute(0, 2, 3, 1).flatten(start_dim=-2, end_dim=-1).unsqueeze(dim=1) # Bx1xtx(dxh)\n else:\n desc = self._log_bin(x)\n return desc\n\n def extract_saliency_maps(self, batch: torch.Tensor) -> torch.Tensor:\n \"\"\"\n extract saliency maps. The saliency maps are extracted by averaging several attention heads from the last layer\n in of the CLS token. All values are then normalized to range between 0 and 1.\n :param batch: batch to extract saliency maps for. Has shape BxCxHxW.\n :return: a tensor of saliency maps. has shape Bxt-1\n \"\"\"\n # assert self.model_type == \"dino_vits8\", f\"saliency maps are supported only for dino_vits model_type.\"\n self._extract_features(batch, [11], 'attn')\n head_idxs = [0, 2, 4, 5]\n curr_feats = self._feats[0] #Bxhxtxt\n cls_attn_map = curr_feats[:, head_idxs, 0, 1:].mean(dim=1) #Bx(t-1)\n temp_mins, temp_maxs = cls_attn_map.min(dim=1)[0], cls_attn_map.max(dim=1)[0]\n cls_attn_maps = (cls_attn_map - temp_mins) / (temp_maxs - temp_mins) # normalize to range [0,1]\n return cls_attn_maps" }, { "identifier": "corrs_from_feat", "path": "thirdparty/dino_vit_features/correspondences.py", "snippet": "def corrs_from_feat(\n descriptors1, descriptors2, saliency_map1, saliency_map2,\n num_patches, stride, p, device, mask1=None, mask2=None):\n\n # calculate similarity between image1 and image2 descriptors\n # similarities = chunk_cosine_sim(descriptors1.unsqueeze(0).unsqueeze(0),\n # descriptors2.unsqueeze(0).unsqueeze(0))\n similarities = F.normalize(descriptors1, dim=1) @ F.normalize(descriptors2, dim=1).T\n\n # calculate best buddies\n image_idxs = torch.arange(num_patches * num_patches, device=device)\n # nn_1 - indices of block2 closest to block1\n sim_1, nn_1 = torch.max(similarities, dim=-1)\n # nn_2 - indices of block1 closest to block2\n sim_2, nn_2 = torch.max(similarities, dim=-2)\n # sim_1, nn_1 = sim_1[0, 0], nn_1[0, 0]\n # sim_2, nn_2 = sim_2[0, 0], nn_2[0, 0]\n bbs_mask = nn_2[nn_1] == image_idxs\n\n # remove best buddies where at least one descriptor is marked bg\n # by saliency mask\n if mask1 is not None and mask2 is not None:\n mask1 = mask1.resize((num_patches, num_patches), resample=Image.LANCZOS)\n mask1 = torch.from_numpy(np.array(mask1).reshape(-1)).to(device)>0\n\n mask2 = mask2.resize((num_patches, num_patches), resample=Image.LANCZOS)\n mask2 = torch.from_numpy(np.array(mask2).reshape(-1)).to(device)>0\n\n fg_mask2_new_coors = nn_2[mask2]\n fg_mask2_mask_new_coors = torch.zeros(num_patches * num_patches, dtype=torch.bool, device=device)\n fg_mask2_mask_new_coors[fg_mask2_new_coors] = True\n bbs_mask = torch.bitwise_and(bbs_mask, mask1)\n bbs_mask = torch.bitwise_and(bbs_mask, fg_mask2_mask_new_coors)\n bbs_mask = bbs_mask.cpu().numpy()\n\n # rank pairs by their mean saliency value\n bbs_mask_idx1 = np.where(bbs_mask)[0]\n bbs_mask_idx2 = nn_1[bbs_mask].cpu().numpy()\n bb_cls_attn1 = saliency_map1[bbs_mask_idx1]\n bb_cls_attn2 = saliency_map2[bbs_mask_idx2]\n bb_cls_attn = (bb_cls_attn1 + bb_cls_attn2) / 2\n ranks_sal = bb_cls_attn\n ranks_sim = sim_1[bbs_mask_idx1]\n\n # get coordinates to show\n img1_bb = torch.arange(\n num_patches * num_patches, device=device)[bbs_mask_idx1]\n img2_bb = nn_1[img1_bb]\n # coordinates in descriptor map's dimensions\n img1_bb_y = (img1_bb / num_patches).long()\n img1_bb_x = (img1_bb % num_patches)\n img2_bb_y = (img2_bb / num_patches).long()\n img2_bb_x = (img2_bb % num_patches)\n pt1 = torch.zeros(len(img1_bb), 2, dtype=torch.long)\n pt2 = torch.zeros(len(img1_bb), 2, dtype=torch.long)\n pt1[:, 1] = (img1_bb_y - 1) * stride + stride + p // 2\n pt1[:, 0] = (img1_bb_x - 1) * stride + stride + p // 2\n pt2[:, 1] = (img2_bb_y - 1) * stride + stride + p // 2\n pt2[:, 0] = (img2_bb_x - 1) * stride + stride + p // 2\n return pt1, pt2, bbs_mask_idx1, bbs_mask_idx2, ranks_sal, ranks_sim" }, { "identifier": "parts_from_feat", "path": "thirdparty/dino_vit_features/part_cosegmentation.py", "snippet": "def parts_from_feat(extractor, layer, facet, bin, transform,\n descriptors_list, saliency_maps_list, images_list, image_paths,\n load_size, device, elbow: float = 0.975, thresh: float = 0.065,\n votes_percentage: int = 75, sample_interval: int = 100,\n num_parts: int = 4, num_crop_augmentations: int = 0,\n three_stages: bool = False, elbow_second_stage: float = 0.94,\n save_dir:Path = Path('')):\n\n new_images_list, aug_descriptors_list = \\\n create_augmentations(\n extractor, layer, facet, bin, transform, images_list,\n load_size, num_crop_augmentations, device)\n\n images_list += new_images_list\n descriptors_list += aug_descriptors_list\n # saliency_maps_list += aug_saliency_maps_list\n num_patches = extractor.get_num_patches(load_size, load_size)[0]\n num_images = len(image_paths)\n\n print(\"Clustering all images using k-means\")\n all_descriptors = np.ascontiguousarray(np.concatenate(descriptors_list, axis=0))\n normalized_all_descriptors = all_descriptors.astype(np.float32)\n faiss.normalize_L2(normalized_all_descriptors) # in-place operation\n sampled_descriptors_list = [x[::sample_interval, :] for x in descriptors_list]\n all_sampled_descriptors = np.ascontiguousarray(np.concatenate(sampled_descriptors_list, axis=0))\n normalized_all_sampled_descriptors = all_sampled_descriptors.astype(np.float32)\n faiss.normalize_L2(normalized_all_sampled_descriptors) # in-place operation\n\n sum_of_squared_dists = []\n n_cluster_range = list(range(1, 15))\n for n_clusters in n_cluster_range:\n algorithm = faiss.Kmeans(d=normalized_all_sampled_descriptors.shape[1], k=n_clusters, niter=300, nredo=10)\n algorithm.train(normalized_all_sampled_descriptors.astype(np.float32))\n squared_distances, labels = algorithm.index.search(normalized_all_descriptors.astype(np.float32), 1)\n objective = squared_distances.sum()\n sum_of_squared_dists.append(objective / normalized_all_descriptors.shape[0])\n if (len(sum_of_squared_dists) > 1 and sum_of_squared_dists[-1] > elbow * sum_of_squared_dists[-2]):\n break\n\n num_labels = np.max(n_clusters) + 1\n num_descriptors_per_image = [num_patches*num_patches] * len(images_list)\n labels_per_image = np.split(labels, np.cumsum(num_descriptors_per_image)[:-1])\n\n if save_dir is not None:\n cmap = 'jet' if num_labels > 10 else 'tab10'\n for path, label_per_image in zip(image_paths, labels_per_image):\n fname = save_dir / f'parts_s1_num{num_labels}_{Path(path).stem}.npy'\n np.save(fname, label_per_image.reshape(num_patches, num_patches))\n fname = save_dir / f'parts_s1_num{num_labels}_{Path(path).stem}.png'\n fig, ax = plt.subplots()\n ax.axis('off')\n ax.imshow(label_per_image.reshape((num_patches, num_patches)), vmin=0,\n vmax=num_labels-1, cmap=cmap)\n fig.savefig(fname, bbox_inches='tight', pad_inches=0)\n plt.close(fig)\n\n print(\"Using saliency maps to vote for salient clusters (only original images vote, not augmentations\")\n votes = np.zeros(num_labels)\n # for image, image_labels, saliency_map in zip(images_list, labels_per_image, saliency_maps_list):\n for img_idx in range(num_images):\n for label in range(num_labels):\n label_saliency = saliency_maps_list[img_idx][labels_per_image[img_idx][:, 0] == label].mean()\n if label_saliency > thresh:\n votes[label] += 1\n salient_labels = np.where(votes >= np.ceil(num_images * votes_percentage / 100))[0]\n\n print(\"Clustering all parts using k-means\")\n fg_masks = [np.isin(labels, salient_labels) for labels in labels_per_image] # get only foreground descriptors\n fg_descriptor_list = [desc[fg_mask[:, 0], :] for fg_mask, desc in zip(fg_masks, descriptors_list)]\n all_fg_descriptors = np.ascontiguousarray(np.concatenate(fg_descriptor_list, axis=0))\n normalized_all_fg_descriptors = all_fg_descriptors.astype(np.float32)\n faiss.normalize_L2(normalized_all_fg_descriptors) # in-place operation\n sampled_fg_descriptors_list = [x[::sample_interval, :] for x in fg_descriptor_list]\n all_fg_sampled_descriptors = np.ascontiguousarray(np.concatenate(sampled_fg_descriptors_list, axis=0))\n normalized_all_fg_sampled_descriptors = all_fg_sampled_descriptors.astype(np.float32)\n faiss.normalize_L2(normalized_all_fg_sampled_descriptors) # in-place operation\n\n sum_of_squared_dists = []\n # if applying three stages, use elbow to determine number of clusters in second stage, otherwise use the specified\n # number of parts.\n n_cluster_range = list(range(1, 15)) if three_stages else [num_parts]\n for n_clusters in n_cluster_range:\n part_algorithm = faiss.Kmeans(d=normalized_all_fg_sampled_descriptors.shape[1], k=n_clusters, niter=300, nredo=10)\n part_algorithm.train(normalized_all_fg_sampled_descriptors.astype(np.float32))\n squared_distances, part_labels = part_algorithm.index.search(normalized_all_fg_descriptors.astype(np.float32), 1)\n objective = squared_distances.sum()\n sum_of_squared_dists.append(objective / normalized_all_fg_descriptors.shape[0])\n if (len(sum_of_squared_dists) > 1 and sum_of_squared_dists[-1] > elbow_second_stage * sum_of_squared_dists[-2]):\n break\n\n part_num_labels = np.max(part_labels) + 1\n # parts_num_descriptors_per_image = [np.count_nonzero(mask) for mask in fg_masks]\n # part_labels_per_image = np.split(part_labels, np.cumsum(parts_num_descriptors_per_image))\n\n print(\"Get smoothed parts using crf\")\n part_segmentations = []\n for img, descs in zip(images_list, descriptors_list):\n bg_centroids = tuple(i for i in range(algorithm.centroids.shape[0]) if not i in salient_labels)\n curr_normalized_descs = descs.astype(np.float32)\n faiss.normalize_L2(curr_normalized_descs) # in-place operation\n # distance to parts\n dist_to_parts = ((curr_normalized_descs[:, None, :] - part_algorithm.centroids[None, ...]) ** 2\n ).sum(axis=2)\n # dist to BG\n dist_to_bg = ((curr_normalized_descs[:, None, :] - algorithm.centroids[None, bg_centroids, :]) ** 2\n ).sum(axis=2)\n min_dist_to_bg = np.min(dist_to_bg, axis=1)[:, None]\n d_to_cent = np.concatenate((dist_to_parts, min_dist_to_bg), axis=1).reshape(num_patches, num_patches,\n part_num_labels + 1)\n d_to_cent = d_to_cent - np.max(d_to_cent, axis=-1)[..., None]\n upsample = torch.nn.Upsample(size=(load_size, load_size))\n u = np.array(upsample(torch.from_numpy(d_to_cent).permute(2, 0, 1)[None, ...])[0].permute(1, 2, 0))\n d = dcrf.DenseCRF2D(u.shape[1], u.shape[0], u.shape[2])\n d.setUnaryEnergy(np.ascontiguousarray(u.reshape(-1, u.shape[-1]).T))\n compat = [50, 15]\n d.addPairwiseGaussian(sxy=(3, 3), compat=compat[0], kernel=dcrf.DIAG_KERNEL,\n normalization=dcrf.NORMALIZE_SYMMETRIC)\n d.addPairwiseBilateral(sxy=5, srgb=13, rgbim=np.ascontiguousarray(img),\n compat=compat[1], kernel=dcrf.DIAG_KERNEL,\n normalization=dcrf.NORMALIZE_SYMMETRIC)\n Q = d.inference(10)\n final = np.argmax(Q, axis=0).reshape((load_size, load_size))\n parts_float = final.astype(np.float32)\n parts_float[parts_float == part_num_labels] = np.nan\n part_segmentations.append(parts_float)\n\n if save_dir is not None:\n part_figs = draw_part_cosegmentation(part_num_labels, part_segmentations[:num_images], images_list[:num_images])\n for path, part_fig, segmentation in zip(image_paths, part_figs, part_segmentations):\n fname = save_dir / f'parts_s2_{Path(path).stem}.npy'\n np.save(fname, segmentation)\n fname = save_dir / f'parts_s2_{Path(path).stem}.png'\n part_fig.savefig(fname, bbox_inches='tight', pad_inches=0)\n plt.close('all')\n\n if three_stages: # if needed, apply third stage\n print(\"Applying third stage\")\n\n # get labels after crf for each descriptor\n smoothed_part_labels_per_image = []\n for part_segment in part_segmentations:\n resized_part_segment = np.array(F.interpolate(\n torch.from_numpy(part_segment)[None, None, ...],\n size=num_patches, mode='nearest')[0, 0])\n smoothed_part_labels_per_image.append(resized_part_segment.flatten())\n\n # take only parts that appear in all original images (otherwise they belong to non-common objects)\n votes = np.zeros(part_num_labels)\n # for _, image_labels in zip(images_list, smoothed_part_labels_per_image):\n for img_idx in range(num_images):\n image_labels = smoothed_part_labels_per_image[img_idx]\n unique_labels = np.unique(image_labels[~np.isnan(image_labels)]).astype(np.int32)\n votes[unique_labels] += 1\n common_labels = np.where(votes == num_images)[0]\n\n # get labels after crf for each descriptor\n common_parts_masks = []\n for part_segment in smoothed_part_labels_per_image:\n common_parts_masks.append(np.isin(part_segment, common_labels).flatten())\n\n # cluster all final parts using k-means:\n common_descriptor_list = [desc[mask, :] for mask, desc in zip(common_parts_masks, descriptors_list)]\n all_common_descriptors = np.ascontiguousarray(np.concatenate(common_descriptor_list, axis=0))\n normalized_all_common_descriptors = all_common_descriptors.astype(np.float32)\n faiss.normalize_L2(normalized_all_common_descriptors) # in-place operation\n sampled_common_descriptors_list = [x[::sample_interval, :] for x in common_descriptor_list]\n all_common_sampled_descriptors = np.ascontiguousarray(np.concatenate(sampled_common_descriptors_list,\n axis=0))\n normalized_all_common_sampled_descriptors = all_common_sampled_descriptors.astype(np.float32)\n faiss.normalize_L2(normalized_all_common_sampled_descriptors) # in-place operation\n if num_parts is None or num_parts <=0:\n num_parts = part_num_labels\n common_part_algorithm = faiss.Kmeans(\n d=normalized_all_common_sampled_descriptors.shape[1], k=int(num_parts),\n niter=300, nredo=10)\n common_part_algorithm.train(normalized_all_common_sampled_descriptors)\n # _, common_part_labels = common_part_algorithm.index.search(normalized_all_common_descriptors.astype(np.float32), 1)\n\n # common_part_num_labels = np.max(common_part_labels) + 1\n # parts_num_descriptors_per_image = [np.count_nonzero(mask) for mask in common_parts_masks]\n # common_part_labels_per_image = np.split(common_part_labels, np.cumsum(parts_num_descriptors_per_image))\n # get smoothed parts using crf\n common_part_segmentations = []\n for _, img, descs in zip(image_paths, images_list, descriptors_list):\n bg_centroids_1 = tuple(i for i in range(algorithm.centroids.shape[0]) if not i in salient_labels)\n bg_centroids_2 = tuple(i for i in range(part_algorithm.centroids.shape[0]) if not i in common_labels)\n curr_normalized_descs = descs.astype(np.float32)\n faiss.normalize_L2(curr_normalized_descs) # in-place operation\n\n # distance to parts\n dist_to_parts = ((curr_normalized_descs[:, None, :] - common_part_algorithm.centroids[None, ...]) ** 2).sum(\n axis=2)\n # dist to BG\n dist_to_bg_1 = ((curr_normalized_descs[:, None, :] -\n algorithm.centroids[None, bg_centroids_1, :]) ** 2).sum(axis=2)\n dist_to_bg_2 = ((curr_normalized_descs[:, None, :] -\n part_algorithm.centroids[None, bg_centroids_2, :]) ** 2).sum(axis=2)\n dist_to_bg = np.concatenate((dist_to_bg_1, dist_to_bg_2), axis=1)\n min_dist_to_bg = np.min(dist_to_bg, axis=1)[:, None]\n d_to_cent = np.concatenate((dist_to_parts, min_dist_to_bg), axis=1).reshape(num_patches, num_patches,\n num_parts + 1)\n d_to_cent = d_to_cent - np.max(d_to_cent, axis=-1)[..., None]\n upsample = torch.nn.Upsample(size=(load_size, load_size))\n u = np.array(upsample(torch.from_numpy(d_to_cent).permute(2, 0, 1)[None, ...])[0].permute(1, 2, 0))\n d = dcrf.DenseCRF2D(u.shape[1], u.shape[0], u.shape[2])\n d.setUnaryEnergy(np.ascontiguousarray(u.reshape(-1, u.shape[-1]).T))\n\n compat = [50, 15]\n d.addPairwiseGaussian(sxy=(3, 3), compat=compat[0], kernel=dcrf.DIAG_KERNEL,\n normalization=dcrf.NORMALIZE_SYMMETRIC)\n d.addPairwiseBilateral(sxy=5, srgb=13, rgbim=np.array(img), compat=compat[1], kernel=dcrf.DIAG_KERNEL,\n normalization=dcrf.NORMALIZE_SYMMETRIC)\n Q = d.inference(10)\n final = np.argmax(Q, axis=0).reshape((load_size, load_size))\n common_parts_float = final.astype(np.float32)\n common_parts_float[common_parts_float == num_parts] = np.nan\n common_part_segmentations.append(common_parts_float)\n\n # reassign third stage results as final results\n part_segmentations = common_part_segmentations\n\n if save_dir is not None:\n part_figs = draw_part_cosegmentation(part_num_labels, part_segmentations[:num_images], images_list[:num_images])\n for path, part_fig, segmentation in zip(image_paths, part_figs, part_segmentations):\n fname = save_dir / f'parts_s3_{Path(path).stem}.npy'\n np.save(fname, segmentation)\n fname = save_dir / f'parts_s3_{Path(path).stem}.png'\n part_fig.savefig(fname, bbox_inches='tight', pad_inches=0)\n plt.close('all')\n\n return part_segmentations" }, { "identifier": "ISNetDIS", "path": "thirdparty/DIS/isnet.py", "snippet": "class ISNetDIS(nn.Module):\n\n def __init__(self,in_ch=3,out_ch=1):\n super(ISNetDIS,self).__init__()\n\n self.conv_in = nn.Conv2d(in_ch,64,3,stride=2,padding=1)\n self.pool_in = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage1 = RSU7(64,32,64)\n self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage2 = RSU6(64,32,128)\n self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage3 = RSU5(128,64,256)\n self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage4 = RSU4(256,128,512)\n self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage5 = RSU4F(512,256,512)\n self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage6 = RSU4F(512,256,512)\n\n # decoder\n self.stage5d = RSU4F(1024,256,512)\n self.stage4d = RSU4(1024,128,256)\n self.stage3d = RSU5(512,64,128)\n self.stage2d = RSU6(256,32,64)\n self.stage1d = RSU7(128,16,64)\n\n self.side1 = nn.Conv2d(64,out_ch,3,padding=1)\n self.side2 = nn.Conv2d(64,out_ch,3,padding=1)\n self.side3 = nn.Conv2d(128,out_ch,3,padding=1)\n self.side4 = nn.Conv2d(256,out_ch,3,padding=1)\n self.side5 = nn.Conv2d(512,out_ch,3,padding=1)\n self.side6 = nn.Conv2d(512,out_ch,3,padding=1)\n\n # self.outconv = nn.Conv2d(6*out_ch,out_ch,1)\n\n def compute_loss_kl(self, preds, targets, dfs, fs, mode='MSE'):\n\n # return muti_loss_fusion(preds,targets)\n return muti_loss_fusion_kl(preds, targets, dfs, fs, mode=mode)\n\n def compute_loss(self, preds, targets):\n\n # return muti_loss_fusion(preds,targets)\n return muti_loss_fusion(preds, targets)\n\n def forward(self,x):\n\n hx = x\n\n hxin = self.conv_in(hx)\n #hx = self.pool_in(hxin)\n\n #stage 1\n hx1 = self.stage1(hxin)\n hx = self.pool12(hx1)\n\n #stage 2\n hx2 = self.stage2(hx)\n hx = self.pool23(hx2)\n\n #stage 3\n hx3 = self.stage3(hx)\n hx = self.pool34(hx3)\n\n #stage 4\n hx4 = self.stage4(hx)\n hx = self.pool45(hx4)\n\n #stage 5\n hx5 = self.stage5(hx)\n hx = self.pool56(hx5)\n\n #stage 6\n hx6 = self.stage6(hx)\n hx6up = _upsample_like(hx6,hx5)\n\n #-------------------- decoder --------------------\n hx5d = self.stage5d(torch.cat((hx6up,hx5),1))\n hx5dup = _upsample_like(hx5d,hx4)\n\n hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))\n hx4dup = _upsample_like(hx4d,hx3)\n\n hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))\n\n\n #side output\n d1 = self.side1(hx1d)\n d1 = _upsample_like(d1,x)\n\n d2 = self.side2(hx2d)\n d2 = _upsample_like(d2,x)\n\n d3 = self.side3(hx3d)\n d3 = _upsample_like(d3,x)\n\n d4 = self.side4(hx4d)\n d4 = _upsample_like(d4,x)\n\n d5 = self.side5(hx5d)\n d5 = _upsample_like(d5,x)\n\n d6 = self.side6(hx6)\n d6 = _upsample_like(d6,x)\n\n # d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))\n\n return [F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)],[hx1d,hx2d,hx3d,hx4d,hx5d,hx6]" } ]
import argparse import numpy as np import torch import torch.nn.functional as F from pathlib import Path from PIL import Image from tqdm import tqdm from torchvision import transforms from torchvision.datasets.utils import download_file_from_google_drive from torchvision.utils import save_image from datasets.cub import CUBDataset from datasets.spair import SpairDataset from models.utils import sample_from_reverse_flow from commons.utils import str2bool from commons.draw import draw_kps, get_dense_colors, splat_points, load_fg_points from thirdparty.MLS.mls import mls_rigid_deformation from commons.utils import map_minmax from thirdparty.dino_vit_features.cosegmentation import coseg_from_feat from thirdparty.dino_vit_features.extractor import ViTExtractor from thirdparty.dino_vit_features.correspondences import corrs_from_feat from thirdparty.dino_vit_features.part_cosegmentation import parts_from_feat from thirdparty.DIS.isnet import ISNetDIS
21,275
mask2 = masks_list[j] saliency_map2 = saliency_maps_list[j] fname = matches_dir / f'{fname1}_{fname2}.npy' if fname.exists(): continue pt1, pt2, pt1_idx, pt2_idx, ranks_sal, ranks_sim = corrs_from_feat( feat1, feat2, saliency_map1, saliency_map2, num_patches, extractor.stride[0], extractor.p, device, mask1, mask2) # Save the output fname = matches_dir / f'{fname1}_{fname2}.npy' d = { 'kp1': pt1.cpu().numpy().astype(np.int32), 'kp2': pt2.cpu().numpy().astype(np.int32), 'kp1_idx': pt1_idx, 'kp2_idx': pt2_idx, 'ranks_attn': ranks_sal, 'ranks_sim': ranks_sim.cpu().numpy(), } np.save(fname, d) # Save sparse correspondences colors = get_dense_colors(pt1, img_size) colors = colors.to(device).unsqueeze(0).expand(2, -1, -1) sparse_corrs = splat_points( torch.stack([img1, img2], dim=0), torch.stack([pt1, pt2]).float().to(device), sigma=2., opacity=1.0, colors=map_minmax(colors, 0, 1, -1, 1)) fname = matches_dir / f'{fname1}_{fname2}.jpg' save_image(sparse_corrs, fname, normalize=True, padding=2, pad_value=1) @torch.no_grad() def save_mls(extractor, dset, out_dir, img_size, transform, device, layer=9, facet='key', bin=False, mls_num=None, mls_alpha=1.): print("Converting NBB to MLS for all pairs of images") _, descriptors_list, saliency_maps_list = \ extract_features_and_saliency_maps( extractor, img_size, layer, facet, bin, transform, dset, out_dir, device) image_paths = dset.files num_patches = extractor.get_num_patches(img_size, img_size)[0] masks_dir = out_dir / 'masks' masks_list = [] for i in tqdm(range(len(dset))): fname = Path(image_paths[i]).stem mask_fname = masks_dir / f'{fname}.png' mask = Image.open(mask_fname).convert('L') masks_list.append(mask) matches_dir = out_dir / f'nbb' matches_dir.mkdir(exist_ok=True, parents=True) descriptors_list = torch.stack([ torch.from_numpy(descriptor).to(device) for descriptor in descriptors_list ]) if mls_num is not None: flow_dir = out_dir / f'mls_num{mls_num}_alpha{mls_alpha}' else: flow_dir = out_dir / f'mls_alpha{mls_alpha}' flow_dir.mkdir(exist_ok=True, parents=True) for i in tqdm(range(len(dset)-1)): img1 = dset.imgs[i].to(device) fname1 = Path(image_paths[i]).stem mask1 = masks_list[i] mask1 = torch.from_numpy(np.array(mask1)>0).to(device) for j in range(i+1, len(dset)): torch.cuda.empty_cache() img2 = dset.imgs[j].to(device) fname2 = Path(image_paths[j]).stem mask2 = masks_list[j] mask2 = torch.from_numpy(np.array(mask2)>0).to(device) fname = matches_dir / f'{fname1}_{fname2}.npy' d = np.load(fname, allow_pickle=True).item() kp1 = d['kp1'] kp1_idx = d['kp1_idx'] kp2 = d['kp2'] kp2_idx = d['kp2_idx'] ranks_attn = d['ranks_attn'] # Run kmeans to get a few well distributed keypoints # if mls_num is not None: # use_indices = kmeans_correspondences( # feat1[kp1_idx], feat2[kp2_idx], ranks_attn, mls_num) # use_indices = use_indices.astype(np.int32) # else: use_indices = np.arange(len(kp1_idx)) # Save sparse correspondences (from kmeans) sparse_corrs = draw_kps( img1, img2, kp1[use_indices], kp2[use_indices], lines=False) fname = flow_dir / f'sparse_{fname1}_{fname2}.jpg' sparse_corrs.save(fname) # Reverse flow from correspondences (MLS) flow21 = mls_rigid_deformation( torch.from_numpy(kp1[use_indices]).to(device), torch.from_numpy(kp2[use_indices]).to(device), alpha=mls_alpha, resolution=img_size) flow21 = flow21.permute(1, 2, 0) flow12 = mls_rigid_deformation( torch.from_numpy(kp2[use_indices]).to(device), torch.from_numpy(kp1[use_indices]).to(device), alpha=mls_alpha, resolution=img_size) flow12 = flow12.permute(1, 2, 0) fname = flow_dir / f'{fname1}_{fname2}.npy' np.save(fname, flow12.cpu().numpy()) fname = flow_dir / f'{fname2}_{fname1}.npy' np.save(fname, flow21.cpu().numpy()) # Dense correspondence (1 to 2) from MLS
@torch.no_grad() def extract_features_and_saliency_maps( extractor, img_size, layer, facet, bin, transform, dset, out_dir, device): images_list = [] descriptors_list = [] saliency_maps_list = [] num_patches = extractor.get_num_patches(img_size, img_size)[0] image_paths = dset.files # Extract features and saliency maps print("Extracting features and saliency maps") feat_dir = out_dir / f'feat_l{layer}_f{facet}_b{bin:1d}' feat_dir.mkdir(exist_ok=True, parents=True) saliency_map_dir = out_dir / 'saliency' saliency_map_dir.mkdir(exist_ok=True, parents=True) for i in tqdm(range(len(dset))): img = dset.imgs[i].to(device) img_unnorm = img * 0.5 + 0.5 img_np = ((img_unnorm) * 255).permute(1, 2, 0).cpu().numpy() images_list.append(img_np.astype(np.uint8)) img_norm = transform(img_unnorm).unsqueeze(0) fname = Path(image_paths[i]).stem # Extract and save features feat_fname = feat_dir / f'{fname}.npy' if feat_fname.is_file(): feat = np.load(feat_fname) else: feat = extractor.extract_descriptors(img_norm, layer, facet, bin) feat = feat.cpu().squeeze().numpy() np.save(feat_fname, feat) descriptors_list.append(feat) sal_fname = saliency_map_dir / f'{fname}.png' if sal_fname.is_file(): saliency_map = Image.open(sal_fname).convert('L') saliency_map = np.array(saliency_map).astype(np.float32) / 255 saliency_map = saliency_map.reshape(-1) else: saliency_map = extractor.extract_saliency_maps(img_norm) saliency_map = saliency_map.squeeze().cpu().numpy() saliency_map = saliency_map.reshape(num_patches, num_patches) saliency_map = Image.fromarray((saliency_map * 255).astype(np.uint8)) saliency_map.save(sal_fname) saliency_map = np.array(saliency_map).astype(np.float32) / 255 saliency_map = saliency_map.reshape(-1) saliency_maps_list.append(saliency_map) return images_list, descriptors_list, saliency_maps_list def save_cosegmentations(extractor, dset, out_dir, img_size, transform, device, layer=11, facet='key', bin=False, thresh=0.065, elbow=0.975, votes_percentage=75, sample_interval=100): print("Running co-segmentation on collection of images") images_list, descriptors_list, saliency_maps_list = \ extract_features_and_saliency_maps( extractor, img_size, layer, facet, bin, transform, dset, out_dir, device) image_paths = dset.files # Run cosegmentation print("Computing masks") segmentation_masks = coseg_from_feat( images_list, descriptors_list, saliency_maps_list, img_size, extractor.get_num_patches(img_size, img_size)[0], elbow, thresh, votes_percentage, sample_interval) masks_dir = out_dir / 'masks_coseg' masks_dir.mkdir(exist_ok=True, parents=True) for i in tqdm(range(len(dset))): fname = Path(image_paths[i]).stem mask_fname = masks_dir / f'{fname}.png' segmentation_masks[i].save(mask_fname) @torch.no_grad() def save_bg(model_path, dset, out_dir, in_size, device): net=ISNetDIS() model_path = Path(model_path) if not model_path.exists(): model_id = "1nV57qKuy--d5u1yvkng9aXW1KS4sOpOi" download_file_from_google_drive(model_id, model_path.parent, filename=model_path.name) net.load_state_dict(torch.load(model_path, map_location="cpu")) net = net.to(device) net.eval() image_paths = dset.files out_dir = out_dir / 'masks' out_dir.mkdir(exist_ok=True, parents=True) print("Computing masks") for i in tqdm(range(len(dset))): img = dset.imgs[i].to(device) # From [-1, 1] to [-0.5, 0.5] img = img / 2.0 img = F.upsample(img.unsqueeze(0), in_size, mode='bilinear') mask = net(img) mask = torch.squeeze(F.upsample(mask[0][0], dset.img_size, mode='bilinear'), 0) ma = torch.max(mask) mi = torch.min(mask) mask = (mask-mi)/(ma-mi) fname = Path(image_paths[i]).stem mask_fname = out_dir / f'{fname}.png' mask = (mask.squeeze() * 255).cpu().numpy() Image.fromarray(mask.astype(np.uint8)).save(mask_fname) @torch.no_grad() def save_correspondences(extractor, dset, out_dir, img_size, transform, device, layer=9, facet='key', bin=False): print("Saving NBB for all pairs of images") _, descriptors_list, saliency_maps_list = \ extract_features_and_saliency_maps( extractor, img_size, layer, facet, bin, transform, dset, out_dir, device) image_paths = dset.files num_patches = extractor.get_num_patches(img_size, img_size)[0] masks_dir = out_dir / 'masks' masks_list = [] for i in tqdm(range(len(dset))): fname = Path(image_paths[i]).stem mask_fname = masks_dir / f'{fname}.png' mask = Image.open(mask_fname).convert('L') masks_list.append(mask) matches_dir = out_dir / f'nbb' matches_dir.mkdir(exist_ok=True, parents=True) descriptors_list = torch.stack([ torch.from_numpy(descriptor).to(device) for descriptor in descriptors_list ]) for i in tqdm(range(len(dset)-1)): img1 = dset.imgs[i].to(device) fname1 = Path(image_paths[i]).stem feat1 = descriptors_list[i] mask1 = masks_list[i] saliency_map1 = saliency_maps_list[i] for j in range(i+1, len(dset)): img2 = dset.imgs[j].to(device) fname2 = Path(image_paths[j]).stem feat2 = descriptors_list[j] mask2 = masks_list[j] saliency_map2 = saliency_maps_list[j] fname = matches_dir / f'{fname1}_{fname2}.npy' if fname.exists(): continue pt1, pt2, pt1_idx, pt2_idx, ranks_sal, ranks_sim = corrs_from_feat( feat1, feat2, saliency_map1, saliency_map2, num_patches, extractor.stride[0], extractor.p, device, mask1, mask2) # Save the output fname = matches_dir / f'{fname1}_{fname2}.npy' d = { 'kp1': pt1.cpu().numpy().astype(np.int32), 'kp2': pt2.cpu().numpy().astype(np.int32), 'kp1_idx': pt1_idx, 'kp2_idx': pt2_idx, 'ranks_attn': ranks_sal, 'ranks_sim': ranks_sim.cpu().numpy(), } np.save(fname, d) # Save sparse correspondences colors = get_dense_colors(pt1, img_size) colors = colors.to(device).unsqueeze(0).expand(2, -1, -1) sparse_corrs = splat_points( torch.stack([img1, img2], dim=0), torch.stack([pt1, pt2]).float().to(device), sigma=2., opacity=1.0, colors=map_minmax(colors, 0, 1, -1, 1)) fname = matches_dir / f'{fname1}_{fname2}.jpg' save_image(sparse_corrs, fname, normalize=True, padding=2, pad_value=1) @torch.no_grad() def save_mls(extractor, dset, out_dir, img_size, transform, device, layer=9, facet='key', bin=False, mls_num=None, mls_alpha=1.): print("Converting NBB to MLS for all pairs of images") _, descriptors_list, saliency_maps_list = \ extract_features_and_saliency_maps( extractor, img_size, layer, facet, bin, transform, dset, out_dir, device) image_paths = dset.files num_patches = extractor.get_num_patches(img_size, img_size)[0] masks_dir = out_dir / 'masks' masks_list = [] for i in tqdm(range(len(dset))): fname = Path(image_paths[i]).stem mask_fname = masks_dir / f'{fname}.png' mask = Image.open(mask_fname).convert('L') masks_list.append(mask) matches_dir = out_dir / f'nbb' matches_dir.mkdir(exist_ok=True, parents=True) descriptors_list = torch.stack([ torch.from_numpy(descriptor).to(device) for descriptor in descriptors_list ]) if mls_num is not None: flow_dir = out_dir / f'mls_num{mls_num}_alpha{mls_alpha}' else: flow_dir = out_dir / f'mls_alpha{mls_alpha}' flow_dir.mkdir(exist_ok=True, parents=True) for i in tqdm(range(len(dset)-1)): img1 = dset.imgs[i].to(device) fname1 = Path(image_paths[i]).stem mask1 = masks_list[i] mask1 = torch.from_numpy(np.array(mask1)>0).to(device) for j in range(i+1, len(dset)): torch.cuda.empty_cache() img2 = dset.imgs[j].to(device) fname2 = Path(image_paths[j]).stem mask2 = masks_list[j] mask2 = torch.from_numpy(np.array(mask2)>0).to(device) fname = matches_dir / f'{fname1}_{fname2}.npy' d = np.load(fname, allow_pickle=True).item() kp1 = d['kp1'] kp1_idx = d['kp1_idx'] kp2 = d['kp2'] kp2_idx = d['kp2_idx'] ranks_attn = d['ranks_attn'] # Run kmeans to get a few well distributed keypoints # if mls_num is not None: # use_indices = kmeans_correspondences( # feat1[kp1_idx], feat2[kp2_idx], ranks_attn, mls_num) # use_indices = use_indices.astype(np.int32) # else: use_indices = np.arange(len(kp1_idx)) # Save sparse correspondences (from kmeans) sparse_corrs = draw_kps( img1, img2, kp1[use_indices], kp2[use_indices], lines=False) fname = flow_dir / f'sparse_{fname1}_{fname2}.jpg' sparse_corrs.save(fname) # Reverse flow from correspondences (MLS) flow21 = mls_rigid_deformation( torch.from_numpy(kp1[use_indices]).to(device), torch.from_numpy(kp2[use_indices]).to(device), alpha=mls_alpha, resolution=img_size) flow21 = flow21.permute(1, 2, 0) flow12 = mls_rigid_deformation( torch.from_numpy(kp2[use_indices]).to(device), torch.from_numpy(kp1[use_indices]).to(device), alpha=mls_alpha, resolution=img_size) flow12 = flow12.permute(1, 2, 0) fname = flow_dir / f'{fname1}_{fname2}.npy' np.save(fname, flow12.cpu().numpy()) fname = flow_dir / f'{fname2}_{fname1}.npy' np.save(fname, flow21.cpu().numpy()) # Dense correspondence (1 to 2) from MLS
pt1_fg, pt1_fg_alpha, colors = load_fg_points(
7
2023-11-14 16:43:16+00:00
24k
tyang816/ProtSSN
src/data.py
[ { "identifier": "CathDataset", "path": "src/dataset/cath_dataset.py", "snippet": "class CathDataset(InMemoryDataset):\n r\"\"\"\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The name of the dataset.\n raw_dir (string, optional): Root directory where the\n original dataset stored(default: :obj:`None`)\n\n num_residue_type (int, optional): The number of amino acid types.\n (default: obj:'20')\n micro_radius (int, optional): The radius of micro-environment\n centered on the mask node. (default: obj:'20')\n c_alpha_max_neighbors (int, optional): The number of maximum\n connected nodes. (default: obj:'10')\n cutoff (int, optional): The maximum connected nodes distance\n (default: obj:'30')\n seq_dist_cut (int, optional): one-hot encoding the sequence distance\n edge attribute\n (default: obj:)\n [0.25,0.5,0.75,0.9,0.95,0.98,0.99]\n [ 2. 3. 13. 63. 127. 247. 347.]\n num_val (int, optional): The number of validation samples in case of \"random\" split. (default: 500)\n num_test (int, optional): The number of test samples in case of \"random\" split. (default: 1000)\n\n # use_localdatastet (bool) (bool,optional): If :obj:'True', online dataset\n # will be downloaded. If not, local pdb files will be used\n # (default: obj:'True')\n\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n \"\"\"\n\n splits = ['train', 'val', 'test']\n allowable_features = {\n 'possible_atomic_num_list': list(range(1, 119)) + ['misc'],\n 'possible_chirality_list': [\n 'CHI_UNSPECIFIED',\n 'CHI_TETRAHEDRAL_CW',\n 'CHI_TETRAHEDRAL_CCW',\n 'CHI_OTHER'\n ],\n 'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],\n 'possible_numring_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],\n 'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],\n 'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'],\n 'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],\n 'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],\n 'possible_hybridization_list': [\n 'SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'misc'\n ],\n 'possible_is_aromatic_list': [False, True],\n 'possible_is_in_ring3_list': [False, True],\n 'possible_is_in_ring4_list': [False, True],\n 'possible_is_in_ring5_list': [False, True],\n 'possible_is_in_ring6_list': [False, True],\n 'possible_is_in_ring7_list': [False, True],\n 'possible_is_in_ring8_list': [False, True],\n 'possible_amino_acids': ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS',\n 'MET',\n 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL', 'HIP', 'HIE', 'TPO', 'HID', 'LEV',\n 'MEU',\n 'PTR', 'GLV', 'CYT', 'SEP', 'HIZ', 'CYM', 'GLM', 'ASQ', 'TYS', 'CYX', 'GLZ', 'misc'],\n 'possible_atom_type_2': ['C*', 'CA', 'CB', 'CD', 'CE', 'CG', 'CH', 'CZ', 'N*', 'ND', 'NE', 'NH', 'NZ', 'O*',\n 'OD',\n 'OE', 'OG', 'OH', 'OX', 'S*', 'SD', 'SG', 'misc'],\n 'possible_atom_type_3': ['C', 'CA', 'CB', 'CD', 'CD1', 'CD2', 'CE', 'CE1', 'CE2', 'CE3', 'CG', 'CG1', 'CG2',\n 'CH2',\n 'CZ', 'CZ2', 'CZ3', 'N', 'ND1', 'ND2', 'NE', 'NE1', 'NE2', 'NH1', 'NH2', 'NZ', 'O',\n 'OD1',\n 'OD2', 'OE1', 'OE2', 'OG', 'OG1', 'OH', 'OXT', 'SD', 'SG', 'misc'],\n }\n\n def __init__(self, root: str,\n split: str = 'train',\n num_residue_type: int = 20,\n micro_radius: int = 20,\n c_alpha_max_neighbors: int = 10,\n cutoff: int = 30,\n seq_dist_cut: int = 64,\n use_micro: bool = False,\n use_angle: bool = False,\n use_omega: bool = False,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None,\n pre_filter: Optional[Callable] = None,\n divide_num: int = 1,\n divide_idx: int = 0,\n set_length: int = 500,\n num_val: int = 10,\n is_normalize: bool = True,\n normalize_file: str = None,\n p: float = 0.5,\n use_sasa: bool =False,\n use_bfactor: bool = False,\n use_dihedral: bool = False,\n use_coordinate: bool = False,\n use_denoise: bool = False,\n noise_type: str = 'wild',\n temperature = 1.0\n ):\n self.p=p\n self.use_sasa=use_sasa\n self.use_bfactor=use_bfactor\n self.use_dihedral=use_dihedral\n self.use_coordinate=use_coordinate\n self.use_denoise=use_denoise\n self.noise_type = noise_type\n self.temperature = temperature\n \n self.split = split\n assert self.split in self.splits\n\n self.num_residue_type = num_residue_type\n self.micro_radius = micro_radius\n self.c_alpha_max_neighbors = c_alpha_max_neighbors\n self.seq_dist_cut = seq_dist_cut\n self.use_micro = use_micro\n self.use_angle = use_angle\n self.use_omega = use_omega\n self.cutoff = cutoff\n\n self.num_val = num_val\n self.divide_num = divide_num\n self.divide_idx = divide_idx\n self.set_length = set_length\n\n self.is_normalize = is_normalize\n self.normalize_file = normalize_file\n\n self.wrong_proteins = ['1kp0A01', '2atcA02']\n\n self.sr = ShrakeRupley(probe_radius=1.4, # in A. Default is 1.40 roughly the radius of a water molecule.\n n_points=100) # resolution of the surface of each atom. Default is 100. A higher number of points results in more precise measurements, but slows down the calculation.\n self.periodic_table = GetPeriodicTable()\n self.biopython_parser = PDBParser()\n\n super().__init__(root, transform, pre_transform, pre_filter)\n self.dataset = torch.load(self.processed_paths[self.splits.index(self.split)])\n # self.data, self.slices = torch.load(\n # self.processed_paths[self.splits.index(self.split)])\n # self.nums_amino_cum = self.slices['x']\n\n @property\n def raw_file_names(self) -> str:\n raw_file_names = os.path.join('data', 'cath', \"dompdb\")\n if not os.path.exists(raw_file_names):\n os.mkdir(raw_file_names)\n return raw_file_names\n\n @property\n def raw_dir(self) -> str:\n if not os.path.exists(self.root):\n os.mkdir(self.root)\n raw_dir = os.path.join(self.root, 'raw')\n if not os.path.exists(raw_dir):\n os.mkdir(raw_dir)\n return raw_dir\n\n @property\n def saved_graph_dir(self) -> str:\n dir_root = os.path.join(self.root)\n if not os.path.exists(dir_root):\n os.mkdir(dir_root)\n dir_name = os.path.join(dir_root, 'graph_seq')\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n if not self.set_length:\n self.set_length = len(os.listdir(dir_name))\n return dir_name\n\n @property\n def saved_amino_cum(self) -> str:\n amino_cum_name = os.path.join(\n self.root, 'amino_cum.pt')\n return amino_cum_name\n\n @property\n def processed_dir(self) -> str:\n return os.path.join(self.root, 'processed_seq')\n\n @property\n def processed_file_names(self) -> str:\n return ['train.pt', 'val.pt']\n\n\n def write_info(self):\n written_filename = os.path.join(self.root, 'wrong_protein_names.txt')\n file = open(written_filename, \"w+\")\n for protein_name in self.wrong_proteins:\n file.writelines(protein_name + '\\n')\n file.close()\n\n def process(self):\n #generate graph data and save in graph dir\n self.generate_protein_graph()\n # self.write_info()\n\n filenames = os.listdir(self.saved_graph_dir)\n protein_length = len(filenames)\n if self.set_length:\n protein_length = min(protein_length, self.set_length)\n\n if not self.normalize_file:\n self.normalize_file = get_stat(self.saved_graph_dir)\n\n random.shuffle(filenames)\n train_list = [f for f in filenames if \"_\" in f or \"-\" in f]\n filenames = [f for f in filenames if \"_\" not in f or \"-\" not in f]\n train_list.extend(filenames[:-self.num_val])\n filenames_list = [train_list, filenames[-self.num_val:]]\n \n for k in range(2):####split train,val,test\n data_list = []\n\n ###move special name to test set\n special_name_list = [\"p53-dimer.pdb.pt\"]\n for special_name in special_name_list:\n if special_name in filenames_list[0]:\n filenames_list[0].remove(special_name)\n filenames_list[1].append(special_name)\n for i in tqdm(range(len(filenames_list[k]))):\n file = filenames_list[k][i]\n try:\n graph1 = torch.load(os.path.join(self.saved_graph_dir, file))##load processed graph data torch pt file\n except:\n print(file)\n continue\n del graph1['distances']\n del graph1['edge_dist']\n del graph1['mu_r_norm']\n del graph1['seq']\n data_list.append(graph1)\n if self.is_normalize:\n normalize_transform = NormalizeProtein(filename=self.normalize_file)\n data_list = [d for d in data_list if normalize_transform(d)]\n if self.pre_filter is not None:\n data_list = [d for d in data_list if self.pre_filter(d)]\n if self.pre_transform is not None:\n data_list = [self.pre_transform(d) for d in data_list]\n\n torch.save(data_list, self.processed_paths[k])\n\n def generate_protein_graph(self):\n names = os.listdir(self.raw_file_names)\n print(names)\n names.sort()\n n = int(np.ceil(len(names) / self.divide_num))\n names = names[n * self.divide_idx:min(len(names), n * (self.divide_idx + 1))]\n for idx, name in enumerate(tqdm(names)):\n saved_graph_filename = os.path.join(self.saved_graph_dir, name + '.pt')\n if os.path.exists(saved_graph_filename):\n continue\n protein_filename = os.path.join(self.raw_file_names, name)\n if (name in self.wrong_proteins) or (not protein_filename):\n continue\n try:\n rec, rec_coords, c_alpha_coords, n_coords, c_coords,seq = self.get_receptor_inference(protein_filename)\n except:\n continue\n if rec !=False:\n if len(seq)>len(c_alpha_coords):\n del seq[-(len(seq)-len(c_alpha_coords)):]\n #meet \"dna\" data will remove the file and rec will be false\n # print(self.c_alpha_max_neighbors)\n rec_graph = self.get_calpha_graph(rec, c_alpha_coords, n_coords, c_coords, rec_coords,seq)\n if not rec_graph:\n self.wrong_proteins.append(name)\n continue\n torch.save(rec_graph, saved_graph_filename)\n\n def rec_residue_featurizer(self, rec, chain_id, one_hot=True, add_feature=None):\n count = 0\n flag_sasa=1\n try:\n self.sr.compute(rec, level=\"R\")\n except:\n flag_sasa=0\n for i, chain in enumerate(rec.get_chains()):\n if i != chain_id:\n continue\n num_res = len(list(chain.get_residues()))#len([_ for _ in rec.get_residues()])\n num_feature = 2\n if add_feature.any():\n num_feature += add_feature.shape[1]\n res_feature = torch.zeros(num_res, self.num_residue_type + num_feature)\n for i, residue in enumerate(chain.get_residues()):\n if flag_sasa==0:\n residue.sasa=0\n sasa = residue.sasa\n for atom in residue:\n if atom.name == 'CA':\n bfactor = atom.bfactor\n assert not np.isinf(bfactor)\n assert not np.isnan(bfactor)\n assert not np.isinf(sasa)\n assert not np.isnan(sasa)\n\n residx = safe_index(\n self.allowable_features['possible_amino_acids'], residue.get_resname())\n res_feat_1 = one_hot_res(\n residx, num_residue_type=self.num_residue_type) if one_hot else [residx]\n if not res_feat_1:\n return False\n res_feat_1.append(sasa)\n res_feat_1.append(bfactor)\n if num_feature > 2:\n res_feat_1.extend(list(add_feature[count, :]))\n res_feature[count, :] = torch.tensor(res_feat_1, dtype=torch.float32)\n count += 1\n # print(\"numnodes:\", num_res, count,len(list(chain.get_residues())))\n for k in range(self.num_residue_type, self.num_residue_type + 2):\n mean = res_feature[:, k].mean()\n std = res_feature[:, k].std()\n res_feature[:, k] = (res_feature[:, k] -mean) / (std + 0.000000001)\n return res_feature\n\n def get_node_features(self, n_coords, c_coords, c_alpha_coords, coord_mask, with_coord_mask=True, use_angle=False,\n use_omega=False):\n num_res = n_coords.shape[0]\n if use_omega:\n num_angle_type = 3\n angles = np.zeros((num_res, num_angle_type))\n for i in range(num_res - 1):\n # These angles are called φ (phi) which involves the backbone atoms C-N-Cα-C\n angles[i, 0] = dihedral(\n c_coords[i], n_coords[i], c_alpha_coords[i], n_coords[i + 1])\n # psi involves the backbone atoms N-Cα-C-N.\n angles[i, 1] = dihedral(\n n_coords[i], c_alpha_coords[i], c_coords[i], n_coords[i + 1])\n angles[i, 2] = dihedral(\n c_alpha_coords[i], c_coords[i], n_coords[i + 1], c_alpha_coords[i + 1])\n else:\n num_angle_type = 2\n angles = np.zeros((num_res, num_angle_type))\n for i in range(num_res - 1):\n # These angles are called φ (phi) which involves the backbone atoms C-N-Cα-C\n angles[i, 0] = dihedral(\n c_coords[i], n_coords[i], c_alpha_coords[i], n_coords[i + 1])\n # psi involves the backbone atoms N-Cα-C-N.\n angles[i, 1] = dihedral(\n n_coords[i], c_alpha_coords[i], c_coords[i], n_coords[i + 1])\n if use_angle:\n node_scalar_features = angles\n else:\n node_scalar_features = np.zeros((num_res, num_angle_type * 2))\n for i in range(num_angle_type):\n node_scalar_features[:, 2 * i] = np.sin(angles[:, i])\n node_scalar_features[:, 2 * i + 1] = np.cos(angles[:, i])\n\n if with_coord_mask:\n node_scalar_features = torch.cat([\n node_scalar_features,\n coord_mask.float().unsqueeze(-1)\n ], dim=-1)\n node_vector_features = None\n return node_scalar_features, node_vector_features\n\n def get_calpha_graph(self, rec, c_alpha_coords, n_coords, c_coords, coords, seq):\n chain_id = 0\n scalar_feature, vec_feature = self.get_node_features(n_coords, c_coords, c_alpha_coords, coord_mask=None, with_coord_mask=False, use_angle=self.use_angle, use_omega=self.use_omega)\n # Extract 3D coordinates and n_i,u_i,v_i\n # vectors of representative residues ################\n residue_representatives_loc_list = []\n n_i_list = []\n u_i_list = []\n v_i_list = []\n for i, chain in enumerate(rec.get_chains()):\n if i != chain_id:\n continue\n for i, residue in enumerate(chain.get_residues()):\n n_coord = n_coords[i]\n c_alpha_coord = c_alpha_coords[i]\n c_coord = c_coords[i]\n u_i = (n_coord - c_alpha_coord) / \\\n np.linalg.norm(n_coord - c_alpha_coord)\n t_i = (c_coord - c_alpha_coord) / \\\n np.linalg.norm(c_coord - c_alpha_coord)\n n_i = np.cross(u_i, t_i) / \\\n np.linalg.norm(np.cross(u_i, t_i)) # main chain\n v_i = np.cross(n_i, u_i)\n assert (math.fabs(\n np.linalg.norm(v_i) - 1.) < 1e-5), \"protein utils protein_to_graph_dips, v_i norm larger than 1\"\n n_i_list.append(n_i)\n u_i_list.append(u_i)\n v_i_list.append(v_i)\n residue_representatives_loc_list.append(c_alpha_coord)\n\n residue_representatives_loc_feat = np.stack(residue_representatives_loc_list, axis=0) # (N_res, 3)\n n_i_feat = np.stack(n_i_list, axis=0)\n u_i_feat = np.stack(u_i_list, axis=0)\n v_i_feat = np.stack(v_i_list, axis=0)\n num_residues = len(c_alpha_coords)\n if num_residues <= 1:\n raise ValueError(f\"rec contains only 1 residue!\")\n ################### Build the k-NN graph ##############################\n assert num_residues == residue_representatives_loc_feat.shape[0]\n assert residue_representatives_loc_feat.shape[1] == 3\n distances = spa.distance.cdist(c_alpha_coords, c_alpha_coords)\n\n src_list = []\n dst_list = []\n dist_list = []\n mean_norm_list = []\n for i in range(num_residues):\n dst = list(np.where(distances[i, :] < self.cutoff)[0])\n dst.remove(i)\n if self.c_alpha_max_neighbors != None and len(dst) > self.c_alpha_max_neighbors:\n dst = list(np.argsort(distances[i, :]))[\n 1: self.c_alpha_max_neighbors + 1]\n if len(dst) == 0:\n # choose second because first is i itself\n dst = list(np.argsort(distances[i, :]))[1:2]\n log(\n f'The c_alpha_cutoff {self.cutoff} was too small for one c_alpha such that it had no neighbors. So we connected it to the closest other c_alpha')\n assert i not in dst\n src = [i] * len(dst)\n src_list.extend(src)\n dst_list.extend(dst)\n valid_dist = list(distances[i, dst])\n dist_list.extend(valid_dist)\n valid_dist_np = distances[i, dst]\n sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))\n weights = softmax(- valid_dist_np.reshape((1, -1))** 2 / sigma, axis=1) # (sigma_num, neigh_num)\n # print(weights) why weight??\n assert weights[0].sum() > 1 - 1e-2 and weights[0].sum() < 1.01\n diff_vecs = residue_representatives_loc_feat[src, :] - residue_representatives_loc_feat[dst, :] # (neigh_num, 3)\n mean_vec = weights.dot(diff_vecs) # (sigma_num, 3)\n denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1)) # (sigma_num,)\n mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator # (sigma_num,)\n mean_norm_list.append(mean_vec_ratio_norm)\n assert len(src_list) == len(dst_list)\n assert len(dist_list) == len(dst_list)\n residue_representatives_loc_feat = torch.from_numpy(residue_representatives_loc_feat.astype(np.float32))\n x = self.rec_residue_featurizer(rec, chain_id, one_hot=True, add_feature=scalar_feature)\n if isinstance(x, bool) and (not x):\n return False\n ######key part to generate graph!!!!!main\n graph = Data(\n x=x,## 26 feature 20+sasa+b factor+ two face angle\n pos=residue_representatives_loc_feat,\n edge_attr=self.get_edge_features(src_list, dst_list, dist_list, divisor=4), ##edge features\n edge_index=torch.tensor([src_list, dst_list]),\n edge_dist=torch.tensor(dist_list),\n distances=torch.tensor(distances),\n mu_r_norm=torch.from_numpy(np.array(mean_norm_list).astype(np.float32)),\n seq = seq) ##about density capture\n # Loop over all edges of the graph and build the various p_ij, q_ij, k_ij, t_ij pairs\n edge_feat_ori_list = []\n for i in range(len(dist_list)):\n src = src_list[i]\n dst = dst_list[i]\n # place n_i, u_i, v_i as lines in a 3x3 basis matrix\n basis_matrix = np.stack(\n (n_i_feat[dst, :], u_i_feat[dst, :], v_i_feat[dst, :]), axis=0)\n p_ij = np.matmul(basis_matrix,residue_representatives_loc_feat[src, :] - residue_representatives_loc_feat[dst, :])\n q_ij = np.matmul(basis_matrix, n_i_feat[src, :]) # shape (3,)\n k_ij = np.matmul(basis_matrix, u_i_feat[src, :])\n t_ij = np.matmul(basis_matrix, v_i_feat[src, :])\n s_ij = np.concatenate((p_ij, q_ij, k_ij, t_ij), axis=0) # shape (12,)\n edge_feat_ori_list.append(s_ij)\n\n edge_feat_ori_feat = np.stack(edge_feat_ori_list, axis=0) # shape (num_edges, 4, 3)\n edge_feat_ori_feat = torch.from_numpy(edge_feat_ori_feat.astype(np.float32))\n graph.edge_attr = torch.cat([graph.edge_attr, edge_feat_ori_feat], axis=1) # (num_edges, 17)\n # graph = self.remove_node(graph, graph.x.shape[0]-1)###remove the last node, can not calculate the two face angle\n # self.get_calpha_graph_single(graph, 6)\n return graph\n\n def remove_node(self, graph, node_idx):\n new_graph = Data.clone(graph)\n # delete node\n new_graph.x = torch.cat(\n [new_graph.x[:node_idx, :], new_graph.x[node_idx + 1:, :]])\n new_graph.pos = torch.cat(\n [new_graph.pos[:node_idx, :], new_graph.pos[node_idx + 1:, :]])\n new_graph.mu_r_norm = torch.cat(\n [new_graph.mu_r_norm[:node_idx, :], new_graph.mu_r_norm[node_idx + 1:, :]])\n\n # delete edge\n keep_edge = (torch.sum(new_graph.edge_index == node_idx, dim=0) == 0)\n new_graph.edge_index = new_graph.edge_index[:, keep_edge]\n new_graph.edge_attr = new_graph.edge_attr[keep_edge, :]\n return new_graph\n\n def get_edge_features(self, src_list, dst_list, dist_list, divisor=4):\n seq_edge = torch.absolute(torch.tensor(\n src_list) - torch.tensor(dst_list)).reshape(-1, 1)\n seq_edge = torch.where(seq_edge > self.seq_dist_cut,\n self.seq_dist_cut, seq_edge)\n seq_edge = F.one_hot(\n seq_edge, num_classes=self.seq_dist_cut + 1).reshape((-1, self.seq_dist_cut + 1))\n contact_sig = torch.where(torch.tensor(\n dist_list) <= 8, 1, 0).reshape(-1, 1)\n # avg distance = 7. So divisor = (4/7)*7 = 4\n dist_fea = self.distance_featurizer(dist_list, divisor=divisor)\n return torch.concat([seq_edge, dist_fea, contact_sig], dim=-1)\n\n def get_receptor_inference(self, rec_path):\n chain_id=0\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=PDBConstructionWarning)\n structure = self.biopython_parser.get_structure('random_id', rec_path)\n rec = structure[0]##len(structure)=1\n head = self.biopython_parser.get_header()['head']\n if head.find('dna') > -1:\n return False, False, False, False, False,False\n coords = []\n c_alpha_coords = []\n n_coords = []\n c_coords = []\n valid_chain_ids = []\n lengths = []\n seq = []\n for i, chain in enumerate(rec):\n print(\"chain num\",i,chain_id,chain)\n if i != chain_id:##select chain A:i=0 or B:i=1\n continue\n chain_coords = [] # num_residues, num_atoms, 3\n chain_c_alpha_coords = []\n chain_n_coords = []\n chain_c_coords = []\n count = 0\n invalid_res_ids = []\n for res_idx, residue in enumerate(chain):\n if residue.get_resname() == 'HOH':\n invalid_res_ids.append(residue.get_id())\n continue\n residue_coords = []\n c_alpha, n, c = None, None, None\n for atom in residue:\n if atom.name == 'CA':\n c_alpha = list(atom.get_vector())\n seq.append(str(residue).split(\" \")[1])\n if atom.name == 'N':\n n = list(atom.get_vector())\n if atom.name == 'C':\n c = list(atom.get_vector())\n residue_coords.append(list(atom.get_vector()))\n # only append residue if it is an amino acid and not some weired molecule that is part of the complex\n if c_alpha != None and n != None and c != None:\n chain_c_alpha_coords.append(c_alpha)\n chain_n_coords.append(n)\n chain_c_coords.append(c)\n chain_coords.append(np.array(residue_coords))\n count += 1\n else:\n invalid_res_ids.append(residue.get_id())\n for res_id in invalid_res_ids:\n chain.detach_child(res_id)\n lengths.append(count)\n coords.append(chain_coords)\n c_alpha_coords.append(np.array(chain_c_alpha_coords))\n n_coords.append(np.array(chain_n_coords))\n c_coords.append(np.array(chain_c_coords))\n if len(chain_coords) > 0:\n valid_chain_ids.append(chain.get_id())\n valid_coords = []\n valid_c_alpha_coords = []\n valid_n_coords = []\n valid_c_coords = []\n valid_lengths = []\n invalid_chain_ids = []\n for i, chain in enumerate(rec):\n # print(\"chain:\",i,chain, len(valid_coords), len(valid_chain_ids), len(coords), coords[0][0].shape, len(coords[0]))\n if i != chain_id:\n continue\n if chain.get_id() in valid_chain_ids:\n valid_coords.append(coords[0])\n valid_c_alpha_coords.append(c_alpha_coords[0])\n valid_n_coords.append(n_coords[0])\n valid_c_coords.append(c_coords[0])\n valid_lengths.append(lengths[0])\n else:\n invalid_chain_ids.append(chain.get_id())\n # list with n_residues arrays: [n_atoms, 3]\n coords = [item for sublist in valid_coords for item in sublist]\n if len(valid_c_alpha_coords) == 0:\n return False, False, False, False, False,False\n c_alpha_coords = np.concatenate(valid_c_alpha_coords, axis=0) # [n_residues, 3]\n n_coords = np.concatenate(valid_n_coords, axis=0) # [n_residues, 3]\n c_coords = np.concatenate(valid_c_coords, axis=0) # [n_residues, 3]\n\n for invalid_id in invalid_chain_ids:\n rec.detach_child(invalid_id)\n\n assert len(c_alpha_coords) == len(n_coords)\n assert len(c_alpha_coords) == len(c_coords)\n assert sum(valid_lengths) == len(c_alpha_coords)\n return rec, coords, c_alpha_coords, n_coords, c_coords,seq\n\n def len(self):\n return len(self.dataset)\n\n def get_statistic_info(self):\n node_num = torch.zeros(self.length_total)\n edge_num = torch.zeros(self.length_total)\n for i in tqdm(range(self.length_total)):\n graph = self.get(i)\n node_num[i] = graph.x.shape[0]\n edge_num[i] = graph.edge_index.shape[1]\n num_node_min = torch.min(node_num)\n num_node_max = torch.max(node_num)\n num_node_avg = torch.mean(node_num)\n num_edge_min = torch.min(edge_num)\n num_edge_max = torch.max(edge_num)\n num_edge_avg = torch.mean(edge_num)\n print(f'Graph Num: {self.length_total}')\n print(\n f'Min Nodes: {num_node_min:.2f} Max Nodes: {num_node_max:.2f}. Avg Nodes: {num_node_avg:.2f}')\n print(\n f'Min Edges: {num_edge_min:.2f} Max Edges: {num_edge_max:.2f}. Avg Edges: {num_edge_avg:.2f}')\n\n def _get_noise(self, token_len: int, prob: List=[]):\n prob = prob if prob else [0.08, 0.05, 0.04, 0.06, 0.01, 0.04, 0.07, 0.07, 0.02, 0.06, 0.1, 0.06,\n 0.02, 0.04, 0.04, 0.06, 0.05, 0.01, 0.03, 0.07]\n multant_pos = ((torch.rand(token_len) <= self.p)).nonzero().flatten()\n if len(multant_pos) == 0:\n return None, None\n multant_trg = torch.multinomial(torch.tensor(prob), len(multant_pos), replacement=True)\n return multant_pos, multant_trg\n \n \n def _token_rep_noise(self, data, multant_pos, multant_trg, rep_noise_type='window_3'):\n num_classes = 20\n multant_rep = data.token_rep.clone()\n for mut_pos, mut_trg in zip(multant_pos, multant_trg):\n mut_trg_ = F.one_hot(mut_trg, num_classes=num_classes)\n if rep_noise_type == 'mean':\n trg_rep = data.token_rep[(data.x[:,:20] == mut_trg_).sum(1) == num_classes].mean(0)\n if torch.isnan(trg_rep).sum() > 0:\n continue\n multant_rep[mut_pos] = trg_rep\n elif \"window\" in rep_noise_type:\n window_size = int(rep_noise_type.split(\"_\")[-1])\n start_pos = mut_pos - math.ceil(window_size/2)\n end_pos = start_pos + window_size\n if end_pos > len(data.token_rep):\n start_pos = mut_pos - window_size\n trg_rep = data.token_rep[start_pos:].mean(0)\n elif start_pos < 0:\n end_pos = window_size\n trg_rep = data.token_rep[:end_pos].mean(0)\n else:\n trg_rep = data.token_rep[start_pos:end_pos].mean(0)\n multant_rep[mut_pos] = trg_rep\n return multant_rep\n\n def get(self, idx):\n # idx_protein = idx\n # idx_x0, idx_x1 = self.slices['x'][idx_protein], self.slices['x'][idx_protein + 1]\n # idx_edge0, idx_edge1 = self.slices['edge_index'][idx_protein], self.slices['edge_index'][idx_protein + 1]\n \n # data = Data(\n # x=self.data.x[idx_x0:idx_x1, :],\n # pos=self.data.pos[idx_x0:idx_x1, :],\n # edge_index=self.data.edge_index[:, idx_edge0:idx_edge1],\n # edge_attr=self.data.edge_attr[idx_edge0:idx_edge1, :],\n # lenth=idx_x1-idx_x0\n # )\n data = self.dataset[idx]\n\n token_len = data.x.shape[0]\n data.y = data.x[:token_len, :self.num_residue_type].argmax(1)\n multant_pos, multant_trg = self._get_noise(token_len=token_len)\n if multant_pos is not None:\n noisey = data.x[:, :20].argmax(dim=1)\n noisey[multant_pos] = multant_trg\n data.x[:,:20] = F.one_hot(noisey, num_classes=20)\n \n return data\n \n\n def find_idx(self, idx_protein, amino_idx):\n idx = (self.distances[idx_protein][:-1, amino_idx]< self.micro_radius).nonzero(as_tuple=True)[0]\n return idx\n \n def get_calpha_graph_single(self, graph, idx_protein, amino_idx):\n choosen_amino_idx = self.find_idx(idx_protein, amino_idx)\n keep_edge_index = []\n for edge_idx in range(graph.num_edges):\n edge = graph.edge_index.t()[edge_idx]\n if (edge[0] in choosen_amino_idx) and (edge[1] in choosen_amino_idx):\n keep_edge_index.append(edge_idx)\n graph1 = Data(x=graph.x[choosen_amino_idx, :],\n pos=graph.pos[choosen_amino_idx, :],\n edge_index=graph.edge_index[:, keep_edge_index],\n edge_attr=graph.edge_attr[keep_edge_index, :],\n mu_r_norm=graph.mu_r_norm[choosen_amino_idx, :])\n return graph1\n \n def __repr__(self) -> str:\n return f'{self.__class__.__name__}()'\n \n def distance_featurizer(self, dist_list, divisor) -> torch.Tensor:\n # you want to use a divisor that is close to 4/7 times the average distance that you want to encode\n length_scale_list = [1.5 ** x for x in range(15)]\n center_list = [0. for _ in range(15)]\n num_edge = len(dist_list)\n dist_list = np.array(dist_list)\n transformed_dist = [np.exp(- ((dist_list / divisor) ** 2) / float(length_scale))\n for length_scale, center in zip(length_scale_list, center_list)]\n transformed_dist = np.array(transformed_dist).T\n transformed_dist = transformed_dist.reshape((num_edge, -1))\n return torch.from_numpy(transformed_dist.astype(np.float32))" }, { "identifier": "MutantDataset", "path": "src/dataset/mutant_dataset.py", "snippet": "class MutantDataset(Dataset):\n r\"\"\"\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The name of the dataset.\n raw_dir (string, optional): Root directory where the\n original dataset stored(default: :obj:`None`)\n\n num_residue_type (int, optional): The number of amino acid types.\n (default: obj:'20')\n micro_radius (int, optional): The radius of micro-environment\n centered on the mask node. (default: obj:'20')\n c_alpha_max_neighbors (int, optional): The number of maximum\n connected nodes. (default: obj:'10')\n cutoff (int, optional): The maximum connected nodes distance\n (default: obj:'30')\n seq_dist_cut (int, optional): one-hot encoding the sequence distance\n edge attribute\n (default: obj:)\n [0.25,0.5,0.75,0.9,0.95,0.98,0.99]\n [ 2. 3. 13. 63. 127. 247. 347.]\n\n # use_localdatastet (bool) (bool,optional): If :obj:'True', online dataset\n # will be downloaded. If not, local pdb files will be used\n # (default: obj:'True')\n\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n \"\"\"\n allowable_features = {\n 'possible_atomic_num_list': list(range(1, 119)) + ['misc'],\n 'possible_chirality_list': [\n 'CHI_UNSPECIFIED',\n 'CHI_TETRAHEDRAL_CW',\n 'CHI_TETRAHEDRAL_CCW',\n 'CHI_OTHER'\n ],\n 'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],\n 'possible_numring_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],\n 'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],\n 'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'],\n 'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],\n 'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],\n 'possible_hybridization_list': [\n 'SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'misc'\n ],\n 'possible_is_aromatic_list': [False, True],\n 'possible_is_in_ring3_list': [False, True],\n 'possible_is_in_ring4_list': [False, True],\n 'possible_is_in_ring5_list': [False, True],\n 'possible_is_in_ring6_list': [False, True],\n 'possible_is_in_ring7_list': [False, True],\n 'possible_is_in_ring8_list': [False, True],\n 'possible_amino_acids': ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',\n 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL', 'HIP', 'HIE', 'TPO', 'HID', 'LEV', 'MEU',\n 'PTR', 'GLV', 'CYT', 'SEP', 'HIZ', 'CYM', 'GLM', 'ASQ', 'TYS', 'CYX', 'GLZ', 'misc'],\n 'possible_atom_type_2': ['C*', 'CA', 'CB', 'CD', 'CE', 'CG', 'CH', 'CZ', 'N*', 'ND', 'NE', 'NH', 'NZ', 'O*', 'OD',\n 'OE', 'OG', 'OH', 'OX', 'S*', 'SD', 'SG', 'misc'],\n 'possible_atom_type_3': ['C', 'CA', 'CB', 'CD', 'CD1', 'CD2', 'CE', 'CE1', 'CE2', 'CE3', 'CG', 'CG1', 'CG2', 'CH2',\n 'CZ', 'CZ2', 'CZ3', 'N', 'ND1', 'ND2', 'NE', 'NE1', 'NE2', 'NH1', 'NH2', 'NZ', 'O', 'OD1',\n 'OD2', 'OE1', 'OE2', 'OG', 'OG1', 'OH', 'OXT', 'SD', 'SG', 'misc'],\n }\n amino_acids_type = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I',\n 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']\n\n def __init__(self, root: str, name: str, raw_dir: str,\n num_residue_type: int = 20,\n micro_radius: int = 20,\n c_alpha_max_neighbors: int = 10,\n cutoff: int = 30,\n seq_dist_cut: int = 64,\n use_micro: bool = False,\n use_angle: bool = False,\n use_omega: bool = False,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None,\n pre_filter: Optional[Callable] = None,\n divide_num: int = 1,\n divide_idx: int = 0,\n replace_graph: bool = False,\n replace_process: bool = False\n ):\n self.divide_num = divide_num\n self.divide_idx = divide_idx\n self.replace_graph = replace_graph\n self.replace_process = replace_process\n\n self.root = root\n self.name = name\n self.raw_root = raw_dir\n self.num_residue_type = num_residue_type\n self.micro_radius = micro_radius\n self.c_alpha_max_neighbors = c_alpha_max_neighbors\n self.cutoff = cutoff\n self.seq_dist_cut = seq_dist_cut\n self.use_micro = use_micro\n self.use_angle = use_angle\n self.use_omega = use_omega\n \n self.protein_names = []\n self.wrong_protein_names = []\n self.total_protein_names = []\n if os.path.exists(self.total_protein_name_file):\n self.protein_names = open(self.saved_protein_name_file, 'r').read().splitlines()\n if os.path.exists(self.wrong_protein_name_file):\n self.wrong_protein_names = open(self.wrong_protein_name_file, 'r').read().splitlines()\n if os.path.exists(self.total_protein_name_file):\n self.total_protein_names = open(self.total_protein_name_file, 'r').read().splitlines()\n \n # in A. Default is 1.40 roughly the radius of a water molecule.\n # resolution of the surface of each atom. Default is 100. A higher number of points results in more precise measurements, but slows down the calculation.\n self.sr = ShrakeRupley(probe_radius=1.4, n_points=100) \n self.biopython_parser = PDBParser()\n\n self.saved_graph_path = self.mk_saved_graph_path()\n super().__init__(root, transform, pre_transform, pre_filter)\n # After processing protein from pdb --> Data\n \n self.length_total = len(self.protein_names)\n\n @property\n def raw_file_names(self) -> str:\n return self.raw_root\n\n @property\n def raw_dir(self) -> str:\n return self.raw_root\n\n def mk_saved_graph_path(self) -> str:\n os.makedirs(os.path.join(self.root, self.name.capitalize()), exist_ok=True)\n graph_dir = os.path.join(self.root, self.name.capitalize(), 'graph')\n os.makedirs(graph_dir, exist_ok=True)\n return graph_dir\n\n @property\n def total_protein_name_file(self) -> str:\n return os.path.join(self.root, self.name.capitalize(), 'total_proteins.txt')\n\n @property\n def saved_protein_name_file(self) -> str:\n return os.path.join(self.root, self.name.capitalize(), 'saved_proteins.txt')\n\n @property\n def wrong_protein_name_file(self) -> str:\n return os.path.join(self.root, self.name.capitalize(), 'wrong_proteins.txt')\n \n @property\n def processed_dir(self) -> str:\n return os.path.join(self.root, self.name.capitalize(), 'processed')\n\n @property\n def processed_file_names(self) -> str:\n return [p+\".pt\" for p in self.protein_names]\n\n def download(self):\n pass\n\n def process(self):\n # if self.replace_graph:\n self.generate_protein_graph_evaluation()\n\n exist_proteins = []\n proteins = open(self.saved_protein_name_file, 'r').read().splitlines()\n for p in proteins:\n file = p + '.pt'\n if os.path.exists(os.path.join(self.saved_graph_path, file)):\n exist_proteins.append(file)\n\n protein_num = len(exist_proteins)\n if (not self.replace_process) and (len(os.listdir(self.processed_dir)) >= protein_num):\n return 0\n\n process_bar = tqdm(exist_proteins)\n for protein in process_bar:\n process_bar.set_description(f\"Processing {protein}\")\n \n graph_data = torch.load(os.path.join(self.saved_graph_path, protein))\n tmpseq = [one_letter[amino] for amino in graph_data.seq]\n graph_data.seq = \"\".join(tmpseq)\n\n if self.pre_filter is not None:\n graph_data = self.pre_filter(graph_data)\n\n if self.pre_transform is not None:\n graph_data = self.pre_transform(graph_data)\n \n saved_prcessed_name = os.path.join(self.processed_dir, protein)\n torch.save(graph_data, saved_prcessed_name)\n\n def generate_protein_graph_evaluation(self):\n self.total_protein_names = sorted(os.listdir(self.raw_dir))\n process_bar = tqdm(self.total_protein_names)\n for name in process_bar:\n process_bar.set_description(f\"Processing {name}\")\n protein_dir = os.path.join(self.raw_dir, name)\n \n if os.path.exists(os.path.join(self.saved_graph_path, name + '.pt')) or not os.path.isdir(protein_dir):\n continue\n\n pdb_suffix = \".pdb\"\n pdb_file = os.path.join(protein_dir, name + pdb_suffix)\n assert os.path.exists(pdb_file), f\"{pdb_file} does not exist\"\n\n rec, rec_coords, c_alpha_coords, n_coords, c_coords,seq = self.get_receptor_inference(\n pdb_file)\n\n rec_graph = self.get_calpha_graph(rec, c_alpha_coords, n_coords, c_coords,seq)\n if not rec_graph:\n self.wrong_protein_names.append(name)\n continue\n torch.save(rec_graph, os.path.join(self.saved_graph_path, name + '.pt'))\n \n with open(self.total_protein_name_file, 'w') as fp:\n for item in self.total_protein_names:\n fp.writelines(\"%s\\n\" % item)\n print(f\"Total proteins: {self.total_protein_names}\")\n \n self.protein_names = sorted([name.split(\".\")[0] for name in os.listdir(self.saved_graph_path)])\n with open(self.saved_protein_name_file, 'w') as fp:\n for item in self.protein_names:\n fp.writelines(\"%s\\n\" % item)\n \n with open(self.wrong_protein_name_file, 'w') as fp:\n for item in self.wrong_protein_names:\n fp.writelines(\"%s\\n\" % item)\n print(f\"Wrong proteins: {self.wrong_protein_names}\")\n\n def rec_residue_featurizer(self, rec, one_hot=True, add_feature=None):\n num_res = len([_ for _ in rec.get_residues()])\n num_feature = 2\n if add_feature.any():\n num_feature += add_feature.shape[1]\n res_feature = torch.zeros(num_res, self.num_residue_type + num_feature)\n count = 0\n self.sr.compute(rec, level=\"R\")\n for residue in rec.get_residues():\n sasa = residue.sasa\n for atom in residue:\n if atom.name == 'CA':\n bfactor = atom.bfactor\n assert not np.isinf(bfactor)\n assert not np.isnan(bfactor)\n assert not np.isinf(sasa)\n assert not np.isnan(sasa)\n\n residx = safe_index(\n self.allowable_features['possible_amino_acids'], residue.get_resname())\n res_feat_1 = one_hot_res(\n residx, num_residue_type=self.num_residue_type) if one_hot else [residx]\n if not res_feat_1:\n return False\n res_feat_1.append(sasa)\n res_feat_1.append(bfactor)\n if num_feature > 2:\n res_feat_1.extend(list(add_feature[count, :]))\n res_feature[count, :] = torch.tensor(\n res_feat_1, dtype=torch.float32)\n count += 1\n\n for k in range(self.num_residue_type, self.num_residue_type + 2):\n mean = res_feature[:, k].mean()\n std = res_feature[:, k].std()\n res_feature[:, k] = (res_feature[:, k] - mean) / (std + 0.000000001)\n return res_feature\n\n def get_node_features(self, n_coords, c_coords, c_alpha_coords, coord_mask, with_coord_mask=True, use_angle=False, use_omega=False):\n num_res = n_coords.shape[0]\n if use_omega:\n num_angle_type = 3\n angles = np.zeros((num_res, num_angle_type))\n for i in range(num_res-1):\n # These angles are called φ (phi) which involves the backbone atoms C-N-Cα-C\n angles[i, 0] = dihedral(c_coords[i], n_coords[i], c_alpha_coords[i], n_coords[i+1])\n # psi involves the backbone atoms N-Cα-C-N.\n angles[i, 1] = dihedral(n_coords[i], c_alpha_coords[i], c_coords[i], n_coords[i+1])\n angles[i, 2] = dihedral(c_alpha_coords[i], c_coords[i], n_coords[i+1], c_alpha_coords[i+1])\n else:\n num_angle_type = 2\n angles = np.zeros((num_res, num_angle_type))\n for i in range(num_res-1):\n # These angles are called φ (phi) which involves the backbone atoms C-N-Cα-C\n angles[i, 0] = dihedral(c_coords[i], n_coords[i], c_alpha_coords[i], n_coords[i+1])\n # psi involves the backbone atoms N-Cα-C-N.\n angles[i, 1] = dihedral(n_coords[i], c_alpha_coords[i], c_coords[i], n_coords[i+1])\n if use_angle:\n node_scalar_features = angles\n else:\n node_scalar_features = np.zeros((num_res, num_angle_type*2))\n for i in range(num_angle_type):\n node_scalar_features[:, 2*i] = np.sin(angles[:, i])\n node_scalar_features[:, 2*i + 1] = np.cos(angles[:, i])\n\n if with_coord_mask:\n node_scalar_features = torch.cat([\n node_scalar_features,\n coord_mask.float().unsqueeze(-1)\n ], dim=-1)\n node_vector_features = None\n return node_scalar_features, node_vector_features\n\n def get_calpha_graph(self, rec, c_alpha_coords, n_coords, c_coords,seq):\n scalar_feature, vec_feature = self.get_node_features(\n n_coords, c_coords, c_alpha_coords, coord_mask=None, \n with_coord_mask=False, use_angle=self.use_angle, use_omega=self.use_omega\n )\n # Extract 3D coordinates and n_i,u_i,v_i\n # vectors of representative residues ################\n residue_representatives_loc_list = []\n n_i_list = []\n u_i_list = []\n v_i_list = []\n for i, residue in enumerate(rec.get_residues()):\n n_coord = n_coords[i]\n c_alpha_coord = c_alpha_coords[i]\n c_coord = c_coords[i]\n u_i = (n_coord - c_alpha_coord) / np.linalg.norm(n_coord - c_alpha_coord)\n t_i = (c_coord - c_alpha_coord) / np.linalg.norm(c_coord - c_alpha_coord)\n n_i = np.cross(u_i, t_i) / np.linalg.norm(np.cross(u_i, t_i)) # main chain\n v_i = np.cross(n_i, u_i)\n assert (math.fabs(np.linalg.norm(v_i) - 1.) < 1e-5), \"protein utils protein_to_graph_dips, v_i norm larger than 1\"\n n_i_list.append(n_i)\n u_i_list.append(u_i)\n v_i_list.append(v_i)\n residue_representatives_loc_list.append(c_alpha_coord)\n \n # (N_res, 3)\n residue_representatives_loc_feat = np.stack(residue_representatives_loc_list, axis=0)\n \n n_i_feat = np.stack(n_i_list, axis=0)\n u_i_feat = np.stack(u_i_list, axis=0)\n v_i_feat = np.stack(v_i_list, axis=0)\n num_residues = len(c_alpha_coords)\n if num_residues <= 1:\n raise ValueError(f\"rec contains only 1 residue!\")\n\n ################### Build the k-NN graph ##############################\n assert num_residues == residue_representatives_loc_feat.shape[0]\n assert residue_representatives_loc_feat.shape[1] == 3\n distances = spa.distance.cdist(c_alpha_coords, c_alpha_coords)\n\n src_list = []\n dst_list = []\n dist_list = []\n mean_norm_list = []\n for i in range(num_residues):\n dst = list(np.where(distances[i, :] < self.cutoff)[0])\n dst.remove(i)\n if self.c_alpha_max_neighbors != None and len(dst) > self.c_alpha_max_neighbors:\n dst = list(np.argsort(distances[i, :]))[1: self.c_alpha_max_neighbors + 1]\n if len(dst) == 0:\n # choose second because first is i itself\n dst = list(np.argsort(distances[i, :]))[1:2]\n log(f'The c_alpha_cutoff {self.cutoff} was too small for one c_alpha such that it had no neighbors. So we connected it to the closest other c_alpha')\n assert i not in dst\n \n src = [i] * len(dst)\n src_list.extend(src)\n dst_list.extend(dst)\n valid_dist = list(distances[i, dst])\n dist_list.extend(valid_dist)\n valid_dist_np = distances[i, dst]\n \n sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))\n # (sigma_num, neigh_num)\n weights = softmax(-valid_dist_np.reshape((1, -1)) ** 2 / sigma, axis=1)\n # print(weights)\n assert weights[0].sum() > 1 - 1e-2 and weights[0].sum() < 1.01\n # (neigh_num, 3)\n diff_vecs = residue_representatives_loc_feat[src, :] - residue_representatives_loc_feat[dst, :]\n # (sigma_num, 3)\n mean_vec = weights.dot(diff_vecs)\n # (sigma_num,)\n denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1))\n # (sigma_num,)\n mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator\n mean_norm_list.append(mean_vec_ratio_norm)\n \n assert len(src_list) == len(dst_list)\n assert len(dist_list) == len(dst_list)\n \n residue_representatives_loc_feat = torch.from_numpy(residue_representatives_loc_feat.astype(np.float32))\n x = self.rec_residue_featurizer(rec, one_hot=True, add_feature=scalar_feature)\n \n if isinstance(x, bool) and (not x):\n return False\n\n graph = Data(\n x=x,\n pos=residue_representatives_loc_feat,\n edge_attr=self.get_edge_features(src_list, dst_list, dist_list, divisor=4),\n edge_index=torch.tensor([src_list, dst_list]),\n edge_dist=torch.tensor(dist_list),\n distances=torch.tensor(distances),\n mu_r_norm=torch.from_numpy(np.array(mean_norm_list).astype(np.float32)),\n seq=seq\n )\n\n # Loop over all edges of the graph and build the various p_ij, q_ij, k_ij, t_ij pairs\n edge_feat_ori_list = []\n for i in range(len(dist_list)):\n src = src_list[i]\n dst = dst_list[i]\n # place n_i, u_i, v_i as lines in a 3x3 basis matrix\n basis_matrix = np.stack((n_i_feat[dst, :], u_i_feat[dst, :], v_i_feat[dst, :]), axis=0)\n p_ij = np.matmul(\n basis_matrix,\n residue_representatives_loc_feat[src, :] - residue_representatives_loc_feat[dst, :]\n )\n q_ij = np.matmul(basis_matrix, n_i_feat[src, :]) # shape (3,)\n k_ij = np.matmul(basis_matrix, u_i_feat[src, :])\n t_ij = np.matmul(basis_matrix, v_i_feat[src, :])\n s_ij = np.concatenate((p_ij, q_ij, k_ij, t_ij), axis=0) # shape (12,)\n edge_feat_ori_list.append(s_ij)\n\n edge_feat_ori_feat = np.stack(edge_feat_ori_list, axis=0) # shape (num_edges, 4, 3)\n edge_feat_ori_feat = torch.from_numpy(edge_feat_ori_feat.astype(np.float32))\n\n graph.edge_attr = torch.cat([graph.edge_attr, edge_feat_ori_feat], axis=1) # (num_edges, 17)\n #graph = self.remove_node(graph, graph.x.shape[0]-1)\n # self.get_calpha_graph_single(graph, 6)\n return graph\n\n def remove_node(self, graph, node_idx):\n new_graph = Data.clone(graph)\n # delete node\n new_graph.x = torch.cat(\n [new_graph.x[:node_idx, :], new_graph.x[node_idx+1:, :]])\n new_graph.pos = torch.cat(\n [new_graph.pos[:node_idx, :], new_graph.pos[node_idx+1:, :]])\n new_graph.mu_r_norm = torch.cat(\n [new_graph.mu_r_norm[:node_idx, :], new_graph.mu_r_norm[node_idx+1:, :]])\n\n # delete edge\n keep_edge = (torch.sum(new_graph.edge_index == node_idx, dim=0) == 0)\n new_graph.edge_index = new_graph.edge_index[:, keep_edge]\n new_graph.edge_attr = new_graph.edge_attr[keep_edge, :]\n return new_graph\n\n def get_edge_features(self, src_list, dst_list, dist_list, divisor=4):\n seq_edge = torch.absolute(torch.tensor(src_list) - torch.tensor(dst_list)).reshape(-1, 1)\n seq_edge = torch.where(seq_edge > self.seq_dist_cut, self.seq_dist_cut, seq_edge)\n seq_edge = F.one_hot(seq_edge, num_classes=self.seq_dist_cut + 1).reshape((-1, self.seq_dist_cut + 1))\n \n contact_sig = torch.where(torch.tensor(dist_list) <= 8, 1, 0).reshape(-1, 1)\n # avg distance = 7. So divisor = (4/7)*7 = 4\n dist_fea = self.distance_featurizer(dist_list, divisor=divisor)\n \n return torch.concat([seq_edge, dist_fea, contact_sig], dim=-1)\n\n def get_receptor_inference(self, rec_path):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=PDBConstructionWarning)\n structure = self.biopython_parser.get_structure('random_id', rec_path)\n rec = structure[0]\n coords = []\n c_alpha_coords = []\n n_coords = []\n c_coords = []\n valid_chain_ids = []\n lengths = []\n seq = []\n for i, chain in enumerate(rec):\n chain_coords = [] # num_residues, num_atoms, 3\n chain_c_alpha_coords = []\n chain_n_coords = []\n chain_c_coords = []\n count = 0\n invalid_res_ids = []\n for res_idx, residue in enumerate(chain):\n if residue.get_resname() == 'HOH':\n invalid_res_ids.append(residue.get_id())\n continue\n residue_coords = []\n c_alpha, n, c = None, None, None\n for atom in residue:\n if atom.name == 'CA':\n c_alpha = list(atom.get_vector())\n seq.append(str(residue).split(\" \")[1])\n if atom.name == 'N':\n n = list(atom.get_vector())\n if atom.name == 'C':\n c = list(atom.get_vector())\n residue_coords.append(list(atom.get_vector()))\n # only append residue if it is an amino acid and not some weired molecule that is part of the complex\n if c_alpha != None and n != None and c != None:\n chain_c_alpha_coords.append(c_alpha)\n chain_n_coords.append(n)\n chain_c_coords.append(c)\n chain_coords.append(np.array(residue_coords))\n count += 1\n else:\n invalid_res_ids.append(residue.get_id())\n for res_id in invalid_res_ids:\n chain.detach_child(res_id)\n lengths.append(count)\n coords.append(chain_coords)\n c_alpha_coords.append(np.array(chain_c_alpha_coords))\n n_coords.append(np.array(chain_n_coords))\n c_coords.append(np.array(chain_c_coords))\n if len(chain_coords) > 0:\n valid_chain_ids.append(chain.get_id())\n valid_coords = []\n valid_c_alpha_coords = []\n valid_n_coords = []\n valid_c_coords = []\n valid_lengths = []\n invalid_chain_ids = []\n for i, chain in enumerate(rec):\n if chain.get_id() in valid_chain_ids:\n valid_coords.append(coords[i])\n valid_c_alpha_coords.append(c_alpha_coords[i])\n valid_n_coords.append(n_coords[i])\n valid_c_coords.append(c_coords[i])\n valid_lengths.append(lengths[i])\n else:\n invalid_chain_ids.append(chain.get_id())\n # list with n_residues arrays: [n_atoms, 3]\n coords = [item for sublist in valid_coords for item in sublist]\n\n c_alpha_coords = np.concatenate(valid_c_alpha_coords, axis=0) # [n_residues, 3]\n n_coords = np.concatenate(valid_n_coords, axis=0) # [n_residues, 3]\n c_coords = np.concatenate(valid_c_coords, axis=0) # [n_residues, 3]\n\n for invalid_id in invalid_chain_ids:\n rec.detach_child(invalid_id)\n\n assert len(c_alpha_coords) == len(n_coords)\n assert len(c_alpha_coords) == len(c_coords)\n assert sum(valid_lengths) == len(c_alpha_coords)\n return rec, coords, c_alpha_coords, n_coords, c_coords,seq\n\n def len(self):\n return len(os.listdir(self.saved_graph_path))\n\n def get_statistic_info(self):\n node_num = torch.zeros(self.length_total)\n edge_num = torch.zeros(self.length_total)\n for i in tqdm(range(self.length_total)):\n graph = self.get(i)\n node_num[i] = graph.x.shape[0]\n edge_num[i] = graph.edge_index.shape[1]\n # if i == 1000:\n # break\n num_node_min = torch.min(node_num)\n num_node_max = torch.max(node_num)\n num_node_avg = torch.mean(node_num)\n num_edge_min = torch.min(edge_num)\n num_edge_max = torch.max(edge_num)\n num_edge_avg = torch.mean(edge_num)\n print(f'Graph Num: {self.length_total}')\n print(\n f'Min Nodes: {num_node_min:.2f} Max Nodes: {num_node_max:.2f}. Avg Nodes: {num_node_avg:.2f}')\n print(\n f'Min Edges: {num_edge_min:.2f} Max Edges: {num_edge_max:.2f}. Avg Edges: {num_edge_avg:.2f}')\n\n def get(self, idx):\n protein_name = self.protein_names[idx]\n data = torch.load(os.path.join(self.processed_dir, f'{protein_name}.pt'))\n notes_number = list((data.x[:, :20].argmax(dim=1)).size())[0]\n data.y = torch.argmax(data.x[torch.tensor(range(notes_number)), :self.num_residue_type], dim=1)\n data.protein_name = protein_name\n return data\n\n def find_idx(self, idx_protein, amino_idx):\n idx = (self.distances[idx_protein][:-1, amino_idx]\n < self.micro_radius).nonzero(as_tuple=True)[0]\n return idx\n\n def get_calpha_graph_single(self, graph, idx_protein, amino_idx):\n choosen_amino_idx = self.find_idx(idx_protein, amino_idx)\n keep_edge_index = []\n \n for edge_idx in range(graph.num_edges):\n edge = graph.edge_index.t()[edge_idx]\n if (edge[0] in choosen_amino_idx) and (edge[1] in choosen_amino_idx):\n keep_edge_index.append(edge_idx)\n \n graph1 = Data(\n x=graph.x[choosen_amino_idx, :],\n pos=graph.pos[choosen_amino_idx, :],\n edge_index=graph.edge_index[:, keep_edge_index],\n edge_attr=graph.edge_attr[keep_edge_index, :],\n mu_r_norm=graph.mu_r_norm[choosen_amino_idx, :]\n )\n return graph1\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}{self.name.capitalize()}()'\n\n def distance_featurizer(self, dist_list, divisor) -> torch.Tensor:\n # you want to use a divisor that is close to 4/7 times the average distance that you want to encode\n length_scale_list = [1.5 ** x for x in range(15)]\n center_list = [0. for _ in range(15)]\n\n num_edge = len(dist_list)\n dist_list = np.array(dist_list)\n\n transformed_dist = [np.exp(- ((dist_list / divisor) ** 2) / float(length_scale))\n for length_scale, center in zip(length_scale_list, center_list)]\n\n transformed_dist = np.array(transformed_dist).T\n transformed_dist = transformed_dist.reshape((num_edge, -1))\n return torch.from_numpy(transformed_dist.astype(np.float32))" }, { "identifier": "NormalizeProtein", "path": "src/utils/dataset_utils.py", "snippet": "class NormalizeProtein(BaseTransform):\n r\"\"\"Centers and normalizes node positions to the interval :math:`(-1, 1)`\n (functional name: :obj:`normalize_scale`).\n \"\"\"\n\n def __init__(self, filename, skip_x=20, skip_edge_attr=64, safe_domi=1e-10):\n\n dic = torch.load(filename)\n self.skip_x = skip_x\n self.skip_edge_attr = skip_edge_attr\n self.safe_domi = safe_domi\n self.x_mean = dic['x_mean']\n self.x_std = dic['x_std']\n self.pos_mean = dic['pos_mean']\n self.pos_std = torch.mean(dic['pos_std'])\n self.edge_attr_mean = dic['edge_attr_mean']\n self.edge_attr_std = dic['edge_attr_std']\n\n def __call__(self, data):\n data.x[:, self.skip_x:] = (data.x[:, self.skip_x:] - self.x_mean[self.skip_x:]\n ).div_(self.x_std[self.skip_x:] + self.safe_domi)\n data.pos = data.pos - data.pos.mean(dim=-2, keepdim=False)\n data.pos = data.pos.div_(self.pos_std + self.safe_domi)\n data.edge_attr[:, self.skip_edge_attr:] = (data.edge_attr[:, self.skip_edge_attr:]\n - self.edge_attr_mean[self.skip_edge_attr:]).div_(self.edge_attr_std[self.skip_edge_attr:] + self.safe_domi)\n\n return data" } ]
import os, sys import argparse from src.dataset.cath_dataset import CathDataset from src.dataset.mutant_dataset import MutantDataset from src.utils.dataset_utils import NormalizeProtein
17,302
# set path current_dir = os.getcwd() sys.path.append(current_dir) def build_cath_dataset(args, split): dataset = CathDataset( root=args.cath_dataset, split=split, divide_num=1, divide_idx=0, c_alpha_max_neighbors=args.c_alpha_max_neighbors, set_length=None, p=args.noise_ratio, normalize_file=f'norm/cath_k{args.c_alpha_max_neighbors}_mean_attr.pt', ) return dataset def build_mutant_dataset(args): mm_dataset = MutantDataset( root=args.mutant_dataset_dir, name=args.mutant_name, raw_dir=args.mutant_dataset_dir+"/DATASET", c_alpha_max_neighbors=args.c_alpha_max_neighbors,
# set path current_dir = os.getcwd() sys.path.append(current_dir) def build_cath_dataset(args, split): dataset = CathDataset( root=args.cath_dataset, split=split, divide_num=1, divide_idx=0, c_alpha_max_neighbors=args.c_alpha_max_neighbors, set_length=None, p=args.noise_ratio, normalize_file=f'norm/cath_k{args.c_alpha_max_neighbors}_mean_attr.pt', ) return dataset def build_mutant_dataset(args): mm_dataset = MutantDataset( root=args.mutant_dataset_dir, name=args.mutant_name, raw_dir=args.mutant_dataset_dir+"/DATASET", c_alpha_max_neighbors=args.c_alpha_max_neighbors,
pre_transform=NormalizeProtein(
2
2023-11-10 07:21:37+00:00
24k
atlantic-quantum/Shipyard
tests/printers/visualizer/test_visualize_pulse_sequences.py
[ { "identifier": "CoreType", "path": "shipyard/awg_core/awg_core.py", "snippet": "class CoreType(Enum):\n \"\"\"Enumeration of AWG Core types\"\"\"\n\n HD = \"HD\"\n QA = \"QA\"\n SG = \"SG\"" }, { "identifier": "ActivationRecord", "path": "shipyard/call_stack.py", "snippet": "class ActivationRecord:\n \"\"\"Activation Records for shipyard\"\"\"\n\n def __init__(\n self,\n name: str,\n ar_type: ARType,\n nesting_level: int,\n ):\n self.name = name\n self.type = ar_type\n self.nesting_level = nesting_level\n self.members = {}\n\n def __setitem__(self, key, value):\n self.members[key] = value\n LOGGER.debug(\"%s: %s\", key, value)\n\n def __getitem__(self, key):\n return self.members[key]\n\n def get(self, key, default=None):\n \"\"\"Gets a member of the activation record by key\"\"\"\n return self.members.get(key, default)\n\n def __str__(self):\n lines = [f\"{self.nesting_level}: {self.type.value} {self.name}\"]\n for name, val in self.members.items():\n lines.append(f\" {name:<20}: {val}\")\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()" }, { "identifier": "ARType", "path": "shipyard/call_stack.py", "snippet": "class ARType(Enum):\n \"\"\"\n Enumeration of Acivation Record Types\n \"\"\"\n\n PROGRAM = \"PROGRAM\"\n EXTERN = \"EXTERN\"\n SUBROUTINE = \"SUBROUTINE\"\n CALIBRATION = \"CALIBRATION\"\n DEFCAL = \"DEFCAL\"\n GATE = \"GATE\"\n LOOP = \"LOOP\"" }, { "identifier": "Compiler", "path": "shipyard/compiler.py", "snippet": "class Compiler:\n version = \"0.1.1\"\n \"\"\"\n Compiler to compile openQASM programs to target programs for different AWG Cores.\n Currently supports compilation to ZI SEQC cores.\n\n Args:\n program_path (Path):\n Path object pointing to a qasm program file.\n setup (Setup | Path):\n Path object pointing to a experiment setup json file.\n frames_from_setup (bool, optional):\n If True, frame definitions and port declarations are generated from setup.\n If False, frame definitions and port declarations should be written\n explicitly in the qasm program.\n Defaults to False to preserve original behavior.\n \"\"\"\n\n def __init__(\n self,\n program_path: Path,\n setup: Setup | Path,\n frames_from_setup: bool = False,\n ) -> None:\n self.program_path = program_path\n self.program = CopyTransformer().visit_Program(self.load_program(program_path))\n setup = setup if isinstance(setup, Setup) else Setup.from_file(setup)\n if frames_from_setup:\n self._frames_from_setup(setup)\n self.setup = setup.to_internal()\n self.split_programs: dict[tuple[str, int, str], ast.Program] = {}\n self.split_compiled: dict[tuple[str, int, str], str] = {}\n self.core_settings: dict[tuple[str, int, str], list[tuple[str], Any]] = {}\n self.wfm_mapping: dict[tuple[str, int, str], dict[int, str]] = {}\n\n @staticmethod\n @lru_cache()\n def load_program(path: Path) -> ast.Program:\n \"\"\"\n Loads a qasm program as an AST from a file\n\n Args:\n path (Path): path to the qasm program file\n\n Returns:\n ast.Program: qasm program as an AST\n \"\"\"\n with open(path, encoding=\"utf_8\") as qasm_file:\n qasm_code = qasm_file.read()\n return parse(qasm_code)\n\n def compile(\n self,\n inputs: dict = None,\n printer_kwargs: dict = None,\n waveforms: dict[str, ndarray] | None = None,\n command_tables: dict[tuple[str, int, str], CommandTable] | None = None,\n ):\n \"\"\"\n Compile a single openQASM program into multiple programs for each\n AWG core in the setup\n\n Args:\n inputs (dict, optional):\n Dictionary of input values for the program. Defaults to None.\n Used to resolve input declarations in the program.\n printer_kwargs (dict, optional):\n Dictionary of keyword arguments to pass to the printer.\n See the printer documentation for more details.\n \"\"\"\n ResolveIODeclaration(inputs).visit(self.program)\n IncludeAnalyzer(self.program_path).visit(self.program)\n IncludeWaveforms(waveforms).visit(self.program)\n SemanticAnalyzer().visit(self.program)\n DurationTransformer().visit(self.program)\n TimingConstraints(self.setup, external_zi_function_dict()).visit(self.program)\n max_delay_obj = DetermineMaxDelay(\n self.program, self.setup, external_zi_function_dict()\n )\n extractor_obj = ShotsExtractor()\n extractor_obj.visit(self.program)\n signature = extractor_obj.create_signature()\n printer_kwargs = printer_kwargs or {}\n for instr, core_index, core_type in self.setup.cores():\n if command_tables:\n command_table = command_tables.get((instr, core_index, core_type))\n else:\n command_table = None\n ports = ports_for_core(self.setup, instr, core_index)\n split_program = CoreSplitter(ports).visit_Program(self.program)\n LOGGER.debug(\n \"Split Program before removing unused, core: (%s, %i, %s):\",\n instr,\n core_index,\n core_type,\n )\n LOGGER.debug(\"\\n%s\", LazyRepr(qasm_dumps, [split_program]))\n for repetition in [\"1st pass\", \"2nd pass\"]:\n RemoveUnused(split_program)\n LOGGER.debug(\n \"Split Program after removing unused (%s), core: (%s, %i, %s):\",\n repetition,\n instr,\n core_index,\n core_type,\n )\n LOGGER.debug(\"\\n%s\", LazyRepr(qasm_dumps, [split_program]))\n self.split_programs[(instr, core_index, core_type)] = split_program\n # todo dynamically choose printer based on instrument type\n InsertCTWaveforms(command_table).visit(split_program)\n printer = SEQCPrinter(\n io.StringIO(),\n self.setup,\n signature,\n max_delay_obj.result(),\n **printer_kwargs\n )\n printer.visit(split_program)\n compiled = printer.stream.getvalue()\n LOGGER.debug(\n \"Compiled Program, core: core: (%s, %i, %s):\",\n instr,\n core_index,\n core_type,\n )\n LOGGER.debug(\"\\n%s\", compiled)\n self.split_compiled[(instr, core_index, core_type)] = compiled\n self.core_settings[(instr, core_index, core_type)] = printer.settings()\n self.wfm_mapping[(instr, core_index, core_type)] = printer.wfm_mapping()\n\n @lru_cache()\n @staticmethod\n def cached_compile(\n program_path: Path,\n setup: Setup | Path,\n inputs: dict | None = None,\n printer_kwargs: dict | None = None,\n frames_from_setup: bool = False,\n ) -> \"Compiler\":\n \"\"\"Method to compile a program and cache the result.\n\n Args:\n program_path (Path):\n path to the qasm program file\n setup (Setup | Path):\n path to the laboratory setup file\n inputs (dict | None, optional):\n dictionary of input values for the program,\n used to resolve input declarations. Defaults to None.\n printer_kwargs (dict | None, optional):\n Dictionary of kwarg arguments to pass to the printer,\n see printer documentation for details. Defaults to None.\n frames_from_setup (bool, optional):\n If True, frame definitions and port declarations are generated from\n setup.\n If False, frame definitions and port declarations should be written\n explicitly in the qasm program.\n Defaults to False to preserve original behavior.\n\n Returns:\n Compiler: cached compiler object\n \"\"\"\n compiler = Compiler(program_path, setup, frames_from_setup)\n compiler.compile(inputs, printer_kwargs)\n return compiler\n\n def _frames_from_setup(self, setup: Setup) -> None:\n \"\"\"\n inserts a calibrationStatement after the defcalgrammar statement, the\n calibrationStatement created from the setup file\n\n Args:\n setup_path (Path): path to the setup file\n\n Raises:\n ValueError: if no calibration grammar is defined in the program\n ValueError: if the calibration grammar is not openpulse\n \"\"\"\n # make sure defcalgrammar has been define before inserting setup\n for i, statement in enumerate(self.program.statements):\n if isinstance(statement, ast.CalibrationGrammarDeclaration):\n break\n else:\n raise ValueError(\n \"No calibration grammar defined in program, cannot insert setup.\"\n )\n # make sure defcalgrammar is openpulse\n if statement.name != \"openpulse\":\n raise ValueError(\"calibration grammar be 'openpulse', \")\n # insert cal from setup after defcalgrammar statement\n self.program.statements.insert(i + 1, setup.get_qasm())" }, { "identifier": "Duration", "path": "shipyard/duration.py", "snippet": "class Duration(BaseModel):\n \"\"\"\n pydantic model for managing times/durations in openQASM programs\n\n Durations have both time and unit (ns, us, ms, s) (and dt which represents sample\n time at 2GS/s)\n\n Durations can be added to other Durations or numbers (int, float), they can also\n be compared to one another or to numbers (int, float)\n\n the native max/min python operations work with lists of Durations.\n\n The unit of a Duration can be changed using the 'set_unit' method.\n \"\"\"\n\n # todo consider rounding to nearest ps/fs to avoid floating point errors.\n time: float\n unit: TimeUnits = TimeUnits.dt\n\n def set_unit(self, unit: TimeUnits):\n \"\"\"\n Changes the unit of the Duration and updates the time to be represented in the\n new unit.\n\n Example:\n dur = Duration(time=100, unit=TimeUnits.ns)\n dur.set_unit(TimeUnits.us)\n\n # dur -> Duration(time=0.1, unit=TimeUnits.us)\n \"\"\"\n self.time = self.time * self.unit.value / unit.value\n self.unit = unit\n\n def _real_time(self) -> float:\n \"\"\"Calculates the time in seconds\n\n Returns:\n float: time in seconds\n \"\"\"\n return self.time * self.unit.value\n\n def __add__(self, other): # (self, other: Self) -> Self\n \"\"\"\n Adds Durations together or a number to a Duration\n\n Example (two Durations):\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n dur3 = dur1 + dur2 # dur3 -> Duration(time=101, unit=TimeUnits.ns)\n dur4 = dur2 + dur1 # dur3 -> Duration(time=0.101, unit=TimeUnits.us)\n\n Example (Duration and int or float):\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = dur1 + 10e-9 # dur2 -> Duration(time=11, unit.TimeUnits.ns)\n\n Args:\n other (Duration | int | float): the Duration or number to add to this\n duration\n\n Raises:\n ValueError: if 'other' is not a Durration, int or float\n\n Returns:\n Duration: sum of this Duration and other\n \"\"\"\n if isinstance(other, Duration):\n return Duration(\n time=self.time + other.time * other.unit.value / self.unit.value,\n unit=self.unit,\n )\n if isinstance(other, (int, float)):\n return Duration(time=self.time + other / self.unit.value, unit=self.unit)\n raise ValueError(f\"'+' not supported between {type(self)} and {type(other)}\")\n\n def __radd__(self, other):\n \"\"\"\n right addition, allows Durations to be added to numbers\n addition of Durations is complimentary\n\n Args:\n other (int | float): number Duration is being added to\n\n Returns:\n Duration: sum of this Duration and other\n \"\"\"\n return self.__add__(other)\n\n def __str__(self) -> str:\n \"\"\"\n Formats how Durations are printed\n Example:\n dur = Duration(time=16, unit=TimeUnits.ns)\n print(dur) -> '16 ns'\n\n Returns:\n str: formated string representation of Duration\n \"\"\"\n return f\"{self.time} {self.unit.name}\"\n\n def __lt__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is lower than another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 < dur2 -> True\n dur < 2 -> False\n dur < 0.1 -> False\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is lower than other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() < other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() < other\n raise ValueError(f\"'<' not supported between {type(self)} and {type(other)}\")\n\n def __gt__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is greater than another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 > dur2 -> False\n dur > 2 -> False\n dur > 0.1e-9 -> True\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is greater than other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() > other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() > other\n raise ValueError(f\"'>' not supported between {type(self)} and {type(other)}\")\n\n def __eq__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is equal to another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 == dur2 -> False\n dur1 == dur1 -> True\n dur == 1e-9 -> True\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is equal to other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() == other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() == other\n raise ValueError(f\"'==' not supported between {type(self)} and {type(other)}\")\n\n def __ne__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is not equal to another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 != dur2 -> True\n dur1 != dur1 -> False\n dur != 1e-9 -> False\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is equal to other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() != other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() != other\n raise ValueError(f\"'!=' not supported between {type(self)} and {type(other)}\")" }, { "identifier": "TimeUnits", "path": "shipyard/duration.py", "snippet": "class TimeUnits(Enum):\n \"\"\"\n Enumerations of common time units\n ns, µs, us, ms, s\n\n and\n\n dt = 0.5e-9 <- timestep @ 2GS/s\n \"\"\"\n\n dt = 0.5e-9\n ns = 1e-9\n µs = 1e-6\n us = 1e-6\n ms = 1e-3\n s = 1" }, { "identifier": "DurationTransformer", "path": "shipyard/passes/duration_transformer.py", "snippet": "class DurationTransformer(GenericTransformer):\n \"\"\"\n QASM Transformer that transforms DurationLiterals to have units of samples (dt).\n\n Args:\n sample_rate (int):\n the sample rate that DurationLiterals will be transformed to.\n Default value = 2e9\n \"\"\"\n\n def __init__(self, sample_rate: int = 2e9) -> None:\n self.sample_rate = sample_rate\n super().__init__()\n\n # pylint: disable=C0103\n # snake_case naming style\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral) -> ast.DurationLiteral:\n \"\"\"\n DurationLiteral node Transformer. Transforms DurationLiteral nodes from any\n unit to a node with sample units (dt).\n\n Example:\n in: node = ast.DurationLiteral(value=20, unit=ast.TimeUnit.ns)\n\n usage: DurationTransformer().visit(node)\n\n out: ast.DurationLiteral(value=40, unit=ast.TimeUnit.dt)\n\n\n Args:\n node (ast.DurationLiteral):\n DurationLiteral node to transform.\n\n Returns:\n ast.DurationLiteral:\n Tranformed DurationLiteral node with unit set to samples (dt)\n \"\"\"\n if node.unit.name != \"dt\":\n new_node = ast.DurationLiteral(\n value=int(\n round(\n node.value\n * TimeUnitToValue[node.unit.name].value\n * self.sample_rate\n )\n ),\n unit=ast.TimeUnit.dt,\n )\n return new_node\n return node\n\n # pylint: enable=C0103" }, { "identifier": "ResolveIODeclaration", "path": "shipyard/passes/resolve_io_declaration.py", "snippet": "class ResolveIODeclaration(GenericTransformer):\n def __init__(self, inputs: dict = None):\n self.inputs = inputs or {} # e.g. inputs = {\"basis\": 1}\n\n def visit_IODeclaration(self, node: ast.IODeclaration) -> ast.ConstantDeclaration:\n \"\"\"\n IODeclaration node Transformer. Transforms IODeclaration nodes to\n ConstantDeclarations. Searches through ResolveIODeclaration.inputs\n for info to populate the ConstantDeclaration.\n\n Args:\n node (ast.IODeclaration):\n IODeclaration node to transform.\n\n Returns:\n ast.ConstantDeclaration:\n Tranformed ConstantDeclaration node with relevant data (identifier and\n init_expression)\n \"\"\"\n if node.io_identifier == ast.IOKeyword.input:\n if node.identifier.name not in self.inputs:\n raise SetupError(\n ErrorCode.ID_NOT_FOUND,\n message=f\"Input: {node.identifier.name} not found in input\"\n \" dictionary\",\n )\n match node.type:\n case ast.IntType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.IntegerLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case ast.DurationType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.DurationLiteral(\n value=(self.inputs[node.identifier.name] * 1e9),\n unit=ast.TimeUnit.ns,\n ),\n )\n # todo: AQC-311 add support for complex input type\n # case ast.ComplexType():\n # return ast.ConstantDeclaration(\n # type=node.type,\n # identifier=node.identifier,\n # init_expression=ast.BinaryExpression(\n # op= ast.BinaryOperator['+'],\n # lhs=ast.FloatLiteral(\n # value= self.inputs[node.identifier.name].real),\n # rhs=ast.ImaginaryLiteral(\n # value= self.inputs[node.identifier.name].imag))\n # )\n case ast.FloatType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.FloatLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case ast.BoolType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.BooleanLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case ast.BitType():\n if isinstance(self.inputs[node.identifier.name], list):\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.ArrayLiteral(\n values=[\n ast.IntegerLiteral(value=s)\n for s in self.inputs[node.identifier.name]\n ]\n ),\n )\n elif isinstance(self.inputs[node.identifier.name], int):\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.IntegerLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n else:\n raise SemanticError(\n ErrorCode.INPUT_TYPE_NOT_SUPPORTED,\n message=f\"Input type not supported: {node.type}\",\n )\n case ast.UintType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.IntegerLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case _:\n raise SemanticError(\n ErrorCode.INPUT_TYPE_NOT_SUPPORTED,\n message=f\"Input type not supported: {node.type}\",\n )\n # case ast.ArrayType():\n # return ast.ConstantDeclaration(\n # type=node.type,\n # identifier=node.identifier,\n # init_expression=ast.ArrayLiteral(\n # values = [ast.IntegerLiteral(value=s)\n # for s in self.inputs[node.identifier.name]]),\n # )\n\n # todo: AQC-312 add support for angle input type\n # case ast.AngleType():\n # # return ast.ConstantDeclaration(\n # # type=node.type,\n # # identifier=node.identifier,\n # # init_expression=ast.FloatLiteral(\n # # value = self.inputs[node.identifier.name]),\n # # )\n # todo: AQC-310 add support for stretch input type\n # case ast.StretchType():\n else:\n raise SemanticError(\n ErrorCode.OUTPUT_NOT_SUPPORTED,\n message=f\"Output type not supported: {node}\",\n )" }, { "identifier": "SemanticAnalyzer", "path": "shipyard/passes/semantic_analysis/semantic_analyzer.py", "snippet": "class SemanticAnalyzer(TypeVisitor, LiteralVisitor, GenericVisitor):\n \"\"\"\n QASMVisitor class that peforms semantic analysis on a openQASM Abstract Syntax Tree\n\n usage:\n qasm_ast = openpulse.parse(qasm_program_string)\n sa = SemanticAnalyser()\n sa.visit(qasm_ast)\n \"\"\"\n\n def __init__(self) -> None:\n self.current_scope: ScopedSymbolTable = None\n self._calibration_scope: CalScopedSymbolTable = None\n self._scope_context: ScopeContext = None\n super().__init__()\n\n @property\n def calibration_scope(self) -> CalScopedSymbolTable:\n \"\"\"Getter for the 'calibration_scope' symbol table of a SemanticAnalyser\n instance. Creates and returns an initialised calibration scope on first call.\n Subsequent calls return the same scope.\n\n Returns:\n CalScopedSymbolTable: a scoped symbol table used for symbols declared within\n openpulse syntax (cal & defcal)\n \"\"\"\n if self._calibration_scope is None:\n self.ensure_in_global_scope(ast.Identifier(\"init cal scope\"))\n self._calibration_scope = CalScopedSymbolTable(\n \"cal_scope\", enclosing_scope=self.current_scope, init_cal=True\n )\n return self._calibration_scope\n\n @property\n def scope_context(self) -> ScopeContext:\n \"\"\"Getter for the 'scope_context' property of a SemanticAnalyser instance\"\"\"\n return self._scope_context\n\n @scope_context.setter\n def scope_context(self, value: ScopeContext):\n LOGGER.debug(\"SET SCOPE CONTEXT: %s\", value)\n self._scope_context = value\n\n # pylint: disable=C0103\n # disable snake_case naming style\n # these functions are of the form \"visit_{QASMNode class name}\"\n def visit_Program(self, node: ast.Program) -> None:\n \"\"\"\n Program node visitor,\n creates and enters a global symbol table (global scope),\n visits all other statements in the openQASM program.\n\n Args:\n node (ast.Program):\n openQASM program ast node to visit\n \"\"\"\n global_scope = ScopedSymbolTable(\n scope_name=\"global\",\n enclosing_scope=self.current_scope,\n )\n with self.scope_context_manager(global_scope, ScopeContext.GLOBAL):\n for statement in node.statements:\n self.visit(statement)\n\n def visit_ExternDeclaration(self, node: ast.ExternDeclaration) -> None:\n \"\"\"\n ExternDeclaration node visitor,\n inserts a symbol representing the external function declaration\n into current_scope (symbol table)\n\n Args:\n node (ast.ExternDeclaration):\n openQASM external function declaration ast node to visit\n \"\"\"\n extern_name = node.name.name\n params = [\n ClassicalSymbol(\n name=f\"{extern_name}_arg_{i}\", kind=self.visit(argument.type)\n )\n for i, argument in enumerate(node.arguments)\n ]\n return_type = self.visit(node.return_type) if node.return_type else None\n extern_symbol = ExternSymbol(\n name=extern_name, params=params, return_type=return_type\n )\n self.declare_symbol(extern_symbol)\n\n def visit_SubroutineDefinition(self, node: ast.SubroutineDefinition) -> None:\n \"\"\"\n SubroutineDefinition node visitor, subroutines may only be defined in global\n scope.\n inserts a symbol representing the subroutine definition into current_scope,\n creates and enters a symbol table (local scope) to encapsulate\n the subroutie,\n inserts all the parameters of the subroutine function signature into the\n new symbol table,\n visits all statements within the subroutine.\n\n Args:\n node (ast.SubroutineDefinition):\n openQASM subroutine definition ast node to visit\n \"\"\"\n self.ensure_in_global_scope(node.name)\n return_type = self.visit(node.return_type) if node.return_type else None\n subroutine_symbol = SubroutineSymbol(\n name=node.name.name, return_type=return_type\n )\n\n self.declare_symbol(subroutine_symbol)\n\n subroutine_scope = ScopedSymbolTable(\n scope_name=node.name.name,\n enclosing_scope=self.current_scope,\n )\n\n with self.scope_context_manager(subroutine_scope, ScopeContext.SUBROUTINE):\n for argument in node.arguments:\n arg_symbol = self.visit(argument)\n subroutine_symbol.params.append(arg_symbol)\n\n for statement in node.body:\n self.visit(statement)\n\n def visit_QuantumGateDefinition(self, node: ast.QuantumGateDefinition) -> None:\n \"\"\"\n QuantumGateDefinition node visitor, quantum gates may only be defined in global\n scope.\n inserts a symbol representing the gate definition into current_scope,\n creates and enters a symbol table (local scope) to encapsulate\n the gate,\n inserts all the parameters and qubits of the gate function signature\n into the new symbol table,\n visits all statements within the gate definition.\n\n Args:\n node (ast.QuantumGateDefinition):\n openQASM quantum gate definition ast node to visit\n \"\"\"\n self.ensure_in_global_scope(node.name)\n gate_symbol = GateSymbol(name=node.name.name)\n\n self.declare_symbol(gate_symbol)\n\n gate_scope = ScopedSymbolTable(\n scope_name=gate_symbol.name,\n enclosing_scope=self.current_scope,\n )\n\n with self.scope_context_manager(gate_scope, ScopeContext.SUBROUTINE):\n for argument in node.arguments:\n arg_symbol = Symbol(name=argument.name)\n self.declare_symbol(arg_symbol)\n gate_symbol.params.append(arg_symbol)\n\n for qubit in node.qubits:\n qubit_symbol = QuantumSymbol(name=qubit.name, kind=\"QUBIT\")\n self.declare_symbol(qubit_symbol)\n gate_symbol.qubits.append(qubit_symbol)\n\n for statement in node.body:\n self.visit(statement)\n\n def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration) -> None:\n \"\"\"\n ClassicalDeclaration node visitor\n inserts a symbol representing the classical variable into current_scope\n\n Note:\n Arrays cannot be declared inside the body of a function or gate.\n All arrays must be declared within the global scope of the program.\n https://openqasm.com/language/types.html#arrays\n\n Args:\n node (ast.ClassicalDeclaration):\n openQASM classical declaration ast node to visit\n \"\"\"\n if isinstance(node.type, ast.ArrayType):\n self.ensure_in_global_scope(node.identifier)\n type_symbol = self.visit(node.type)\n LOGGER.debug(\n \"Classical Declaration: name: %s, kind: %s\",\n node.identifier.name,\n type_symbol,\n )\n decl_symbol = ClassicalSymbol(name=node.identifier.name, kind=type_symbol)\n self.declare_symbol(decl_symbol)\n\n def visit_ConstantDeclaration(self, node: ast.ConstantDeclaration) -> None:\n \"\"\"\n ConstantDeclaration node visitor\n inserts a symbol representing the constant into current_scope\n\n Args:\n node (ast.ConstantDeclaration):\n openQASM constant declaration ast node to visit\n \"\"\"\n type_symbol = self.visit(node.type)\n decl_symbol = ConstantSymbol(name=node.identifier.name, kind=type_symbol)\n self.declare_symbol(decl_symbol)\n\n def visit_QubitDeclaration(self, node: ast.QubitDeclaration) -> None:\n \"\"\"\n QubitDeclaration node visitor\n inserts a symbol representing the qubit into current_scope\n\n Note:\n All qubits are global variables.\n Qubits cannot be declared within gates or subroutines.\n https://openqasm.com/language/types.html#quantum-types\n\n Args:\n node (ast.QubitDeclaration):\n openQASM qubit declaration ast node to visit\n \"\"\"\n # qubits can only be declared in global scope\n self.ensure_in_global_scope(node.qubit)\n decl_symbol = QuantumSymbol(name=node.qubit.name, kind=\"QUBIT\")\n self.declare_symbol(decl_symbol)\n\n def visit_IODeclaration(self, node: ast.IODeclaration) -> None:\n \"\"\"\n ToDo: may require more / different handling when we start using this\n\n IODeclaration node visitor\n inserts a symbol representing the io into current_scope\n\n input/output modifiers can be used to indicate that variables will be\n supplied to / generated by an openQASM program at runtime\n\n https://openqasm.com/language/directives.html#input-output\n\n Args:\n node (ast.IODeclaration):\n openQASM io declaration ast node to visit\n \"\"\"\n type_symbol = self.visit(node.type)\n decl_symbol = IOSymbol(name=node.identifier.name, kind=type_symbol)\n self.declare_symbol(decl_symbol)\n\n def visit_Identifier(self, node: ast.Identifier):\n \"\"\"\n Identifier node visitor:\n Looks up the name of the identifer within current and enclosing scope,\n raises an ID_NOT_FOUND error if the identifier hasn't been declared\n\n Args:\n node (ast.Identifier):\n openQASM identifier node to visit\n\n Raises:\n SemanticError with ErrorCode.ID_NOT_FOUND\n \"\"\"\n node_symbol = self.current_scope.lookup(node.name)\n if node.name[0] == \"$\":\n pass\n elif node_symbol is None:\n raise self.error(ErrorCode.ID_NOT_FOUND, node.name)\n\n def visit_AliasStatement(self, node: ast.AliasStatement) -> None:\n \"\"\"\n AliastStatement node visitor:\n Creates and declares a symbol for an Alias.\n Then visits the value the alias is assigned\n\n Args:\n node (ast.AliasStatement):\n openQASM alias statment to visit\n \"\"\"\n alias_symbol = AliasSymbol(name=node.target.name)\n self.declare_symbol(alias_symbol)\n self.visit(node.value)\n\n def visit_CalibrationStatement(self, node: ast.CalibrationStatement) -> None:\n \"\"\"\n CalibrationStatement node visitor, (cal {} statements):\n Enters calibration scope and visits all statements in the body of the\n calibration statement.\n\n Args:\n node (ast.CalibrationStatement):\n openQASM calibration statement node to visit\n \"\"\"\n self.ensure_in_global_scope(ast.Identifier(\"Calibration Statement\"))\n with self.scope_context_manager(self.calibration_scope, ScopeContext.DEFCAL):\n for statement in node.body:\n self.visit(statement)\n\n def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition) -> None:\n \"\"\"\n CalibrationDefinition node visitor, (defcal {} statements):\n Gets a mangles name for the calibration definition and uses it\n to create a symbol representing the defcal statement.\n Inserts a symbol representing the defcal statement into calibration scope.\n Creates a new CalScopedSymbolTable and enters it.\n Inserts symbols for all parameters and qubits into the new scope.\n Visits all statements withing the body of the defcal statement\n\n Args:\n node (ast.CalibrationDefinition):\n openQASM calibration definition node to visit\n \"\"\"\n self.ensure_in_global_scope(node.name)\n defcal_name = Mangler(node).signature().mangle()\n return_type = self.visit(node.return_type) if node.return_type else None\n defcal_symbol = DefcalSymbol(name=defcal_name, return_type=return_type)\n with self.scope_context_manager(\n self.calibration_scope, context=ScopeContext.DEFCAL\n ):\n self.declare_symbol(defcal_symbol)\n\n defcal_scope = CalScopedSymbolTable(\n scope_name=defcal_symbol.name,\n enclosing_scope=self.calibration_scope,\n )\n\n with self.scope_context_manager(defcal_scope, ScopeContext.DEFCAL):\n for argument in node.arguments:\n arg_symbol = self.visit(argument)\n defcal_symbol.params.append(arg_symbol)\n\n for qubit in node.qubits:\n qubit_symbol = QuantumSymbol(\n name=qubit.name, kind=self.current_scope.lookup(\"QUBIT\").name\n )\n self.declare_symbol(qubit_symbol)\n defcal_symbol.qubits.append(qubit_symbol)\n\n for statement in node.body:\n self.visit(statement)\n\n def visit_QuantumGate(self, node: ast.QuantumGate) -> None:\n \"\"\"\n QuantumGate node visitor, (gate call):\n Gets the mangled name best matching the gate call.\n Looks up the mangled name of the gate within the calibration scope.\n Raises an ID_NOT_FOUND error if the gate hasn't been declared.\n\n Args:\n node (ast.QuantumGate):\n openQASM qauntum gate node to visit\n\n Raises:\n SemanticError with ErrorCode.ID_NOT_FOUND\n \"\"\"\n f_signature = Mangler(node).signature()\n symbols = f_signature.match(self.current_scope.keys())\n if not symbols:\n symbols = f_signature.match(self.calibration_scope.keys())\n if symbols:\n # per https://github.com/openqasm/openqasm/issues/245\n return symbols[-1]\n raise self.error(ErrorCode.ID_NOT_FOUND, node.name)\n\n def visit_ClassicalArgument(self, node: ast.ClassicalArgument) -> ClassicalSymbol:\n \"\"\"\n ClassicalArgument node visitor:\n Creates and inserts a ClassicalSymbol for function arguments (def, defcal)\n into current scope\n\n Args:\n node (ast.ClassicalArgument):\n openQASM classical argument node to visit\n\n Returns:\n ClassicalSymbol: the symbol inserted in to current scope\n \"\"\"\n arg_symbol = ClassicalSymbol(name=node.name.name, kind=self.visit(node.type))\n self.declare_symbol(arg_symbol)\n return arg_symbol\n\n def visit_QuantumArgument(self, node: ast.QuantumArgument) -> QuantumSymbol:\n \"\"\"\n QuantumArgument node visitor:\n Creates and inserts a QuantumSymbol for function arguments (def, defcal)\n into current scope\n\n Args:\n node (ast.QuantumArgument):\n openQASM quantum argument node to visit\n\n Returns:\n QuantumSymbol: the symbol inserted in to current scope\n \"\"\"\n arg_symbol = QuantumSymbol(name=node.name.name, kind=\"QUBIT\")\n self.declare_symbol(arg_symbol)\n return arg_symbol\n\n def visit_ForInLoop(self, node: ast.ForInLoop) -> None:\n \"\"\"\n ForInLoop node visitor:\n Visits the set declaration (what will be looped over)\n Enters a new scope.\n Inserts a symbol representing the loop variable into the new scope\n Visits every statement in the block of the ForInLoop\n\n Args:\n node (ast.ForInLoop):\n openQASM for in loop node to visit\n \"\"\"\n type_symbol = self.visit(node.type)\n loop_symbol = ClassicalSymbol(name=node.identifier.name, kind=type_symbol)\n self.visit(node.set_declaration)\n with self.local_context_manager(\"for_loop_scope\", node.block):\n self.current_scope.insert(loop_symbol)\n\n def visit_BranchingStatement(self, node: ast.BranchingStatement) -> None:\n \"\"\"\n BranchingStatement node visitor (if/else):\n visits the condition node of the if/else statement\n Enters a new scope for the if block and visits every statment within it.\n Leaves the if block scope\n Enters a new scope for the else block and visits every statment within it.\n\n Args:\n node (ast.BranchingStatement):\n openQASM branching (if/else) node to visit\n \"\"\"\n self.visit(node.condition)\n with self.local_context_manager(\"if_scope\", node.if_block):\n pass\n with self.local_context_manager(\"else_scope\", node.else_block):\n pass\n\n def visit_WhileLoop(self, node: ast.WhileLoop) -> None:\n \"\"\"\n WhileLoop node visitor:\n visits the condition node of the while statement\n Enters a new scope for the while block and visits every statment within it.\n\n Args:\n node (ast.WhileLoop):\n openQASM while node to visit\n \"\"\"\n self.visit(node.while_condition)\n with self.local_context_manager(\"while_scope\", node.block):\n pass\n\n def visit_Box(self, node: ast.Box) -> None:\n \"\"\"\n Box node visitor:\n visits the duration node of the Box statement\n Enters a new scope for the Box block and visits every statment within it.\n\n Args:\n node (ast.Box):\n openQASM Box node to visit\n \"\"\"\n if node.duration:\n self.visit(node.duration)\n with self.local_context_manager(\"box_scope\", node.body):\n pass\n\n def visit_UnaryExpression(self, node: ast.UnaryExpression):\n \"\"\"\n UnaryExpression node visitor:\n validates the operator of the unary expression node\n visits the expression of the unary expression node\n\n Args:\n node (ast.UnaryExpression):\n openQASM unary expression node to visit\n \"\"\"\n # todo check if unary op is allowed for expression\n assert isinstance(node.op, type(ast.UnaryOperator[\"!\"]))\n self.visit(node.expression)\n\n def visit_BinaryExpression(self, node: ast.BinaryExpression):\n \"\"\"\n BinaryExpression node visitor:\n validates the operator of the binary expression node\n visits each side of the binary expression\n\n Args:\n node (ast.BinaryExpression):\n openQASM binary expression node to visit\n \"\"\"\n # todo check if binary op is allowed between lhs and rhs\n assert isinstance(node.op, type(ast.BinaryOperator[\"+\"]))\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_FunctionCall(self, node: ast.FunctionCall):\n \"\"\"\n FunctionCall node visitor:\n visits the name (Identifier) node of the function call\n visits all the argument nodes of the function call\n\n Args:\n node (ast.FunctionCall):\n openQASM function call node to visit\n \"\"\"\n self.visit(node.name)\n for argument in node.arguments:\n self.visit(argument)\n\n def visit_Cast(self, node: ast.Cast):\n \"\"\"\n Cast node visitor:\n validates that the type being cast to is a classical type\n # todo should be more narrow, e.g. durration can't be cast to\n visits the argument node of the cast node\n\n Args:\n node (ast.Cast):\n openQASM cast node to visit\n \"\"\"\n assert isinstance(node.type, ast.ClassicalType)\n self.visit(node.argument)\n\n def visit_IndexExpression(self, node: ast.IndexExpression):\n \"\"\"\n IndexExpression node visitor:\n visits collection node of an index expression node\n visits index node of an index expression node\n\n Args:\n node (ast.IndexExpression):\n openQASM index expression node to visit\n \"\"\"\n self.visit(node.collection)\n if isinstance(node.index, list):\n for i_node in node.index:\n self.visit(i_node)\n else:\n self.visit(node.index)\n\n def visit_DiscreteSet(self, node: ast.DiscreteSet):\n \"\"\"\n DiscreteSet node visitor:\n visits each node of a DiscreteSet\n\n Args:\n node (ast.DiscreteSet):\n openQASM discreate set node to visit\n \"\"\"\n for expression in node.values:\n self.visit(expression)\n\n def visit_RangeDefinition(self, node: ast.RangeDefinition):\n \"\"\"\n RangeDefinition node visitor:\n visits start, end and step nodes of a RangeDefinition\n\n Args:\n node (ast.RangeDefinition):\n openQASM range definition node to visit\n \"\"\"\n if node.start:\n self.visit(node.start)\n if node.end:\n self.visit(node.end)\n if node.step:\n self.visit(node.step)\n\n def visit_Concatenation(self, node: ast.Concatenation):\n \"\"\"\n Concatenation node visitor:\n visits each side of the concatenation expression\n\n Args:\n node (ast.Concatenation):\n openQASM concatenation node to visit\n \"\"\"\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_BitstringLiteral(self, node: ast.BitstringLiteral) -> LiteralSymbol:\n \"\"\"\n BitstringLiteral node visitor:\n\n Args:\n node (ast.BitstringLiteral):\n openQASM bitstring literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_BitstringLiteral(node)\n return LiteralSymbol(name=value, kind=\"BITSTRING\")\n\n def visit_IntegerLiteral(self, node: ast.IntegerLiteral) -> LiteralSymbol:\n \"\"\"\n IntegerLiteral node visitor:\n\n Args:\n node (ast.IntegerLiteral):\n openQASM integer literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_IntegerLiteral(node)\n return LiteralSymbol(name=value, kind=\"INT\")\n\n def visit_FloatLiteral(self, node: ast.FloatLiteral) -> LiteralSymbol:\n \"\"\"\n FloatLiteral node visitor:\n\n Args:\n node (ast.FloatLiteral):\n openQASM float literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_FloatLiteral(node)\n return LiteralSymbol(name=value, kind=\"FLOAT\")\n\n def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral) -> LiteralSymbol:\n \"\"\"\n ImaginaryLiteral node visitor:\n\n Args:\n node (ast.ImaginaryLiteral):\n openQASM imaginary literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_ImaginaryLiteral(node)\n return LiteralSymbol(name=value, kind=\"IMAGINARY\")\n\n def visit_BooleanLiteral(self, node: ast.BooleanLiteral) -> LiteralSymbol:\n \"\"\"\n BooleanLiteral node visitor:\n\n Args:\n node (ast.BooleanLiteral):\n openQASM boolean literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_BooleanLiteral(node)\n return LiteralSymbol(name=value, kind=\"BOOL\")\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral) -> LiteralSymbol:\n \"\"\"\n DurationLiteral node visitor:\n\n Args:\n node (ast.DurationLiteral):\n openQASM duration literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_DurationLiteral(node)\n return LiteralSymbol(name=value, kind=\"DURATION\")\n\n # pylint: disable=C0103\n # (snake_case naming style)\n\n def _visit_type_node(self, node: ast.ClassicalType) -> str:\n \"\"\"\n type node visitor:\n Returns the name of a Type node\n Example:\n node:ast.FloatType -> 'FLOAT'\n\n Args:\n node (ast.ClassicalType): node that is a subclass of ClassicalType\n\n Returns:\n str: name of the node type\n \"\"\"\n name = super()._visit_type_node(node)\n name_in_table = self.current_scope.lookup(name).name\n return name_in_table\n\n def error(self, error_code: ErrorCode, name: str) -> SemanticError:\n \"\"\"\n Method for standardizing error handling of the SemanticAnalyser class.\n Logs current scope and returns a SemanticError object that should be raised\n immediately after this method retuns\n\n Usage:\n raise self.error(...)\n\n Args:\n error_code (ErrorCode):\n Code to identify what issue caused an error to be raised\n name (str):\n An identifer string to identify what caused the error\n\n Returns:\n SemanticError: should be raised immediatly on method return\n \"\"\"\n LOGGER.debug(\"CURRENT SCOPE: %s\", self.current_scope)\n LOGGER.debug(\"CALIBRATION SCOPE: %s\", self._calibration_scope)\n return SemanticError(error_code, message=f\"{error_code.value} -> {name}\")\n\n def declare_symbol(self, symbol: Symbol):\n \"\"\"Method for standardizing symbol declaration.\n Symbols are first looked up (in current scope only)\n before being inserted into current scope (if not already in scope)\n\n Args:\n symbol (Symbol): to insert into current scope\n\n Raises:\n SemanticError: ErrorCode.DUBLICATE_ID\n \"\"\"\n if self.current_scope.lookup(symbol.name, current_scope_only=True):\n raise self.error(ErrorCode.DUPLICATE_ID, symbol.name)\n self.current_scope.insert(symbol)\n\n def ensure_in_global_scope(self, node: ast.Identifier):\n \"\"\"\n Ensures that the current scope_context is global scope\n Used to make sure that declarations such as Subroutines and defcals\n Are only used in the allowed scope (GLOBAL)\n\n Args:\n node (ast.Identifier): Node that is currently being visited\n\n Raises:\n SemanticError: ErrorCode.NOT_IN_GLOBAL_SCOPE\n \"\"\"\n if not self.scope_context == ScopeContext.GLOBAL:\n raise self.error(ErrorCode.NOT_IN_GLOBAL_SCOPE, node.name)\n\n @contextmanager\n def scope_context_manager(\n self,\n symbol_table: ScopedSymbolTable,\n context: ScopeContext,\n ):\n \"\"\"\n Context manager for entering/leaving scopes in specific ScopeContext\n\n Args:\n symbol_table (ScopedSymbolTable): Symbol Table / Scope to enter\n context (ScopeContext): what context the scope is entered in\n \"\"\"\n enclosing_scope = self.current_scope\n enclosing_context = self.scope_context\n self.current_scope = symbol_table\n self.scope_context = context\n try:\n yield\n finally:\n if enclosing_context:\n self.scope_context = enclosing_context\n if enclosing_scope:\n self.current_scope = enclosing_scope\n LOGGER.debug(symbol_table)\n LOGGER.debug(\"LEAVE scope: %s\", symbol_table.scope_name)\n\n @contextmanager\n def local_context_manager(self, name: str, block: list[ast.Statement]):\n \"\"\"\n Context manager for entering/leaving local scopes (if/else, for, while, box)\n What ScopeContext is entered depends on the current ScopeContext.\n If in GLOBAL then enter LOCAL\n Else (LOCAL, SUBROUTINE, DEFCAL) then keep context unchanged.\n Once in the new scope nodes in the block of the scope will be visited in order\n\n Args:\n name (str):\n Name of the ScopedSymbolTable to enter\n block (list[ast.Statement]):\n list of openQASM statments nodes, visited in order\n \"\"\"\n scope = ScopedSymbolTable(name, enclosing_scope=self.current_scope)\n context = (\n ScopeContext.LOCAL\n if self.scope_context == ScopeContext.GLOBAL\n else self.scope_context\n )\n\n with self.scope_context_manager(scope, context):\n yield\n for statement in block:\n self.visit(statement)" }, { "identifier": "PulseVisualizer", "path": "shipyard/printers/visualizer/visualize_pulse_sequence.py", "snippet": "class PulseVisualizer(Interpreter):\n def __init__(\n self,\n setup: SetupInternal = None,\n external_functions: dict = None,\n ):\n super().__init__(setup, external_functions)\n self.pulses = {} # dict of pulses for each frame/ port\n self.phases = {} # dict of phases for each frame/ port\n self.frequencies = {} # dict of frequencies for each frame/ port\n self.plot_flag: bool = False\n\n def visit_Program(self, node: ast.Program) -> None:\n activation_record = ActivationRecord(\n name=\"main\", ar_type=ARType.PROGRAM, nesting_level=1\n )\n with self.ar_context_manager(activation_record):\n for statement in node.statements:\n self.visit(statement)\n for frame in self.pulses.keys():\n self.plotter(\n np.concatenate(self.pulses[frame]),\n np.concatenate(self.phases[frame]),\n np.concatenate(self.frequencies[frame]),\n frame,\n )\n\n def plotter(self, wfm_array, phase_array, frequency_array, frame_name):\n fig, axs = plt.subplots(3)\n if all(isinstance(i, complex) for i in wfm_array):\n axs[0].plot([value.real for value in wfm_array], label=\"real\")\n axs[0].plot([value.imag for value in wfm_array], label=\"imag\")\n axs[0].legend()\n else:\n axs[0].plot(wfm_array)\n axs[0].set(ylabel=f\"{frame_name} amplitude\")\n axs[1].plot(phase_array)\n axs[1].set(ylabel=f\"{frame_name} phase\")\n axs[2].plot(frequency_array)\n axs[2].set(ylabel=f\"{frame_name} frequency\")\n if self.plot_flag: # pragma: no cover\n plt.show()\n\n @_maybe_annotated\n def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration) -> None:\n \"\"\"\n ClassicalDeclaration node visitor:\n Visits and stores classical declarations of variables. If the variable\n declared is a frame, the frame is added to the current activation record,\n as well as the Interpreter's pulse, phase, and frequency dictionaries.\n\n Args:\n node (ast.ClassicalDeclaration): openQASM ClassicalDeclaration AST node\n\n \"\"\"\n activation_record = self.call_stack.peek()\n match node:\n case ast.ClassicalDeclaration(type=ast.PortType()):\n name = node.identifier.name\n activation_record[name] = self.setup.ports[name]\n case ast.ClassicalDeclaration(\n type=ast.FrameType(),\n init_expression=ast.FunctionCall(name=ast.Identifier(\"newframe\")),\n ):\n call = node.init_expression\n assert isinstance(call, ast.FunctionCall)\n assert len(call.arguments) == 3\n port = call.arguments[0].name\n frequency = self.visit(call.arguments[1])\n phase = self.visit(call.arguments[2])\n frame = Frame(\n name=node.identifier.name,\n port=activation_record[port],\n frequency=frequency,\n phase=phase,\n )\n self.pulses[frame.name] = []\n self.phases[frame.name] = []\n self.frequencies[frame.name] = []\n activation_record[frame.name] = frame\n case ast.ClassicalDeclaration(type=ast.ArrayType()):\n if node.init_expression is None:\n shapes = [dim.value for dim in node.type.dimensions]\n activation_record[node.identifier.name] = np.zeros(shape=shapes)\n else:\n activation_record[node.identifier.name] = self.visit(\n node.init_expression\n )\n case _:\n if node.init_expression is not None:\n activation_record[node.identifier.name] = self.visit(\n node.init_expression\n )\n else:\n activation_record[node.identifier.name] = None\n\n @_maybe_annotated\n def visit_DelayInstruction(self, node: ast.DelayInstruction) -> None:\n \"\"\"\n DelayInstruction node visitor:\n Appends delay of 0s to relevant frame\n\n Args:\n node (ast.DelayInstruction): openQASM DelayInstruction AST node\n \"\"\"\n for q in node.qubits:\n if q.name in self.pulses.keys():\n self.pulses[q.name].append(np.zeros(int(self.visit(node.duration))))\n self.phases[q.name].append(\n np.full(\n int(self.visit(node.duration)),\n self.call_stack.down_stack(q.name)[q.name].phase,\n )\n )\n self.frequencies[q.name].append(\n np.full(\n int(self.visit(node.duration)),\n self.call_stack.down_stack(q.name)[q.name].frequency,\n )\n )\n\n def visit_play(self, node: ast.FunctionCall) -> None:\n \"\"\"\n FunctionCall node visitor. Handles 'play' and 'capture' function calls.\n For 'play', 'capture_v1', and 'capture_v2' function calls, the function\n call is visited and the resulting waveform is appended to the relevant\n frame's pulse, phase, and frequency arrays. For 'capture_v3' and\n 'capture_v1' function calls, the function call is visited and the resulting\n time value is returned and turned into an array of 1s of that length, and\n appeneded to the relevant frame's pulse, phase, and frequency arrays.\n\n Args:\n node (ast.FunctionCall): 'play' FunctionCall node to visit\n\n Raises:\n Error:\n ErrorCode.UNHANDLED\n If the node does not match the expected format/structure\n \"\"\"\n match node:\n case ast.FunctionCall(\n name=ast.Identifier(\"play\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v1\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v2\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ):\n wfm_array = self.visit(wfm_node)\n self.phases[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].phase,\n )\n )\n self.pulses[frame_name].append(wfm_array)\n self.frequencies[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].frequency,\n )\n )\n case ast.FunctionCall(\n name=ast.Identifier(\"capture_v3\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v1_spectrum\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ):\n val = self.visit(wfm_node)\n self.phases[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].phase,\n )\n )\n self.pulses[frame_name].append(np.ones(int(val)))\n self.frequencies[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].frequency,\n )\n )\n\n case _:\n raise Error(\n ErrorCode.UNHANDLED,\n f\"Unhandled waveform generation: {node}\",\n )" }, { "identifier": "waveform_functions", "path": "shipyard/printers/zi/waveform_functions.py", "snippet": "def zeros(samples: int) -> np.ndarray:\ndef placeholder(samples: int) -> np.ndarray:\ndef ones(samples: int) -> np.ndarray:\ndef sine(\n samples: int,\n amplitue: float,\n phase_offset: float,\n n_periods: int,\n) -> np.ndarray:\ndef cosine(\n samples: int,\n amplitue: float,\n phase_offset: float,\n n_periods: int,\n) -> np.ndarray:\ndef sinc(samples: int, amplitude: float, position: int, beta: float) -> np.ndarray:\ndef ramp(samples: int, start_level: float, end_level: float) -> np.ndarray:\ndef sawtooth(\n samples: int, amplitude: float, phase_offset: float, n_periods: int\n) -> np.ndarray:\ndef triangle(\n samples: int, amplitude: float, phase_offset: float, n_periods: int\n) -> np.ndarray:\ndef gauss(samples: int, amplitude: float, position: int, width: float) -> np.ndarray:\ndef drag(samples: int, amplitude: float, position: int, width: float) -> np.ndarray:\ndef blackman(samples: int, amplitude: float, alpha: float) -> np.ndarray:\ndef hamming(samples: int, amplitude: float) -> np.ndarray:\ndef hann(samples: int, amplitude: float) -> np.ndarray:\ndef rect(samples: int, amplitude: float) -> np.ndarray:\ndef chirp(\n samples: int,\n amplitude: float,\n start_freq: float,\n stop_freq: float,\n phase: float = 0.0,\n) -> np.ndarray:\ndef rrc(\n samples: int, amplitude: float, position: int, beta: float, width: float\n) -> np.ndarray:\n def _special_value():" }, { "identifier": "Frame", "path": "shipyard/setup/internal.py", "snippet": "class Frame(BaseModel):\n \"\"\"\n Representation of the openQASM openpulse frame concept as a pydantic model.\n https://openqasm.com/language/openpulse.html#frames\n\n Args:\n name (str):\n name of the frame.\n port (Port):\n the Port object the frame is associated with.\n frequency (float):\n the frequency the frame evolves at. Defaults to 0.\n phase (float):\n the phase of the frame.\n time (Duration):\n the time of the frame.\n \"\"\"\n\n name: str\n port: Port\n frequency: float = 0.0\n phase: float = 0.0\n time: Duration = Duration(time=0)\n\n def set_phase(self, phase: float):\n \"\"\"Sets the phase of the frame\n\n Args:\n phase (float): the value the phase will be set to\n \"\"\"\n self.phase = phase\n\n def shift_phase(self, phase: float):\n \"\"\"Shifts the phase of the frame\n\n Args:\n phase (float): the value the phase will be shifted by.\n \"\"\"\n self.phase += phase\n\n def get_phase(self) -> float:\n \"\"\"Gets the phase of the frame\n\n Returns:\n float: current value of the phase of the frame.\n \"\"\"\n return self.phase\n\n def set_frequency(self, frequency: float):\n \"\"\"Sets the frequency of the frame\n\n Args:\n frequency (float): the value the frequency will be set to.\n \"\"\"\n self.frequency = frequency\n\n def shift_frequency(self, frequency: float):\n \"\"\"Shifts the frequency of the frame\n\n Args:\n frequency (float): the value the frequency will be shifted by.\n \"\"\"\n self.frequency += frequency\n\n def get_frequency(self) -> float:\n \"\"\"Gets the frequency of the frame\n\n Returns:\n float: current value of the frequency of the frame.\n \"\"\"\n return self.frequency\n\n def advance(self, duration: Duration):\n \"\"\"Advances the time of the frame by some duration\n\n Args:\n duration (Duration): the duration to advance the time of the frame by.\n \"\"\"\n self.time += duration\n\n def advance_to(self, duration: Duration):\n \"\"\"Advances the time of the frame to some other time\n\n Args:\n duration (Duration): the duratioin to advance the time fo the frame to.\n\n Raises:\n ValueError:\n If the time the frame should be advanced to is less than the\n current time of the frame.\n \"\"\"\n duration.set_unit(self.time.unit)\n if self.time > duration:\n raise ValueError(f\"Cant advance current time {self.time} to {duration}\")\n self.time.time = int(duration.time * duration.unit.value / self.time.unit.value)" }, { "identifier": "Instrument", "path": "shipyard/setup/internal.py", "snippet": "class Instrument(BaseModel):\n \"\"\"\n Minimal information required to identify an Instrument\n\n Args:\n name (str):\n name of instrument instance, used to easily identify one intrument from\n another.\n type (InstrumentType):\n Literal representing the type/model of the instrument.\n serial (str):\n Serial number of the instrument in string format.\n \"\"\"\n\n name: str\n type: InstrumentType\n serial: str" }, { "identifier": "Port", "path": "shipyard/setup/internal.py", "snippet": "class Port(BaseModel):\n \"\"\"\n Representation of the openQASM openpulse port concept as a pydantic model.\n https://openqasm.com/language/openpulse.html#ports\n\n Args:\n name (str):\n name of the port.\n instrument (Instrument):\n What instrument the port is associated with.\n core (Core):\n Settings for the AWG Core the port is associated with.\n \"\"\"\n\n class Core(BaseModel):\n \"\"\"\n Settings for a AWG core\n\n Args:\n type (CoreType):\n the Type of AWG Core this 'Core' object is\n index (int):\n the index of the AWG Core on the Instrument this 'Core' object belongs.\n channels (list[int]):\n the channels of the AWG Core this 'Core' object belongs\n \"\"\"\n\n type: CoreType\n index: int\n channels: list[int]\n\n # pylint: disable=R0903\n # too-few-public-methods\n class Config:\n \"\"\"Pydantic model config for Core\"\"\"\n\n frozen = True\n\n # pylint: enable=R0903\n\n def obj(self) -> AWGCore:\n \"\"\"\n Returns an AWGCore subclass of type matching the type of the pydantic core\n model.\n\n Returns:\n AWGCore: AWGCore subclass of type matching the model instance.\n \"\"\"\n return CORE_TYPE_TO_CLASS[self.type]\n\n @validator(\"channels\")\n def not_more_channels_than_core_type_allows(cls, channels: list[int], values):\n \"\"\"\n Validates that the number of channels for the Core object does\n not exceed the number of channels allowed by the CoreType\n \"\"\"\n assert channels\n assert \"type\" in values\n assert len(channels) <= CORE_TYPE_TO_CLASS[values[\"type\"]].n_channels\n return channels\n\n name: str\n instrument: Instrument\n core: Core\n\n # pylint: disable=R0903\n # too-few-public-methods\n class Config:\n \"\"\"Pydantic model config for Port\"\"\"\n\n frozen = True\n\n # pylint: enable=R0903" }, { "identifier": "SetupInternal", "path": "shipyard/setup/internal.py", "snippet": "class SetupInternal(BaseModel):\n\n \"\"\"\n A Pydantic model containing the information required to compile an openQASM program\n to instrument level instructions.\n\n It is recommended to instanciate this object from a configuration file\n (json (future yml?))\n \"\"\"\n\n # todo validation\n\n # todo move to own module\n instruments: dict[str, Instrument]\n ports: dict[str, Port]\n frames: dict[str, Frame]\n\n @classmethod\n def from_dict(cls, setup: dict[str, dict[str, dict]]) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a dictionary\n\n Args:\n setup (dict[str, dict[str, dict]]): dictionary to create a Setup object from\n\n Returns:\n Setup: created from dictionary\n \"\"\"\n instruments = {\n k: Instrument(name=k, **v) for k, v in setup[\"Instruments\"].items()\n }\n ports = {}\n for k, val in setup[\"Ports\"].items():\n val[\"instrument\"] = instruments[val[\"instrument\"]]\n val[\"core\"] = Port.Core(**val[\"core\"])\n ports[k] = Port(name=k, **val)\n frames = {}\n for k, val in setup[\"Frames\"].items():\n val[\"port\"] = ports[val[\"port\"]]\n frames[k] = Frame(name=k, **val)\n return cls(instruments=instruments, ports=ports, frames=frames)\n\n def to_dict(self) -> dict[str, dict[str, dict]]:\n \"\"\"Creates a dictionary from a Setup object\n\n Args:\n filename (Path | str, optional):\n path to save dictionary to. Defaults to None.\n\n Returns:\n dict[str, dict[str, dict]]: dictionary created from Setup object\n \"\"\"\n setup = {\n \"Instruments\": {\n k: {\n \"type\": v.type,\n \"serial\": v.serial,\n }\n for k, v in self.instruments.items()\n },\n \"Ports\": {\n k: {\n \"instrument\": v.instrument.name,\n \"core\": {\n \"type\": v.core.type.value,\n \"index\": v.core.index,\n \"channels\": v.core.channels,\n },\n }\n for k, v in self.ports.items()\n },\n \"Frames\": {\n k: {\n \"port\": v.port.name,\n \"frequency\": v.frequency,\n \"phase\": v.phase,\n }\n for k, v in self.frames.items()\n },\n }\n return setup\n\n @classmethod\n def from_json(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a json file\n\n Args:\n filename (str | Path): path to json file\n\n Returns:\n Setup: created from json file\n \"\"\"\n with open(filename, encoding=\"utf-8\") as file:\n data = json.load(file)\n return cls.from_dict(data)\n\n def to_json(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a json file\n\n Args:\n filename (str | Path): path to json file to create\n\n Returns:\n Path: path to json file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n json.dump(data, file, indent=4)\n return Path(filename)\n\n @classmethod\n def from_yml(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a yml file\n\n Args:\n filename (str | Path): path to yml file\n\n Returns:\n Setup: created from yml file\n \"\"\"\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n data = yaml.safe_load(file)\n return cls.from_dict(data)\n\n def to_yml(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a yml file\n\n Args:\n filename (str | Path): path to yml file to create\n\n Returns:\n Path: path to yml file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n yaml.dump(data, file)\n return Path(filename)\n\n def cores(self) -> set[tuple[str, int, str]]:\n \"\"\"Gets all the AWG Cores used in the setup\n\n Returns:\n set[tuple[str, int, str]]:\n a Set of tuples, each tuple has a string representing the instruement\n name, a integer representing the index of the awg core of the\n instrument and a string representing the type of the awg core.\n \"\"\"\n return set(\n (port.instrument.name, port.core.index, port.core.type.value)\n for port in self.ports.values()\n )" } ]
import codecs import json import numpy as np import pytest from pathlib import Path from shipyard.awg_core.awg_core import CoreType from shipyard.call_stack import ActivationRecord, ARType from shipyard.compiler import Compiler from shipyard.duration import Duration, TimeUnits from shipyard.passes.duration_transformer import DurationTransformer from shipyard.passes.resolve_io_declaration import ResolveIODeclaration from shipyard.passes.semantic_analysis.semantic_analyzer import SemanticAnalyzer from shipyard.printers.visualizer.visualize_pulse_sequence import PulseVisualizer from shipyard.printers.zi import waveform_functions from shipyard.setup.internal import Frame, Instrument, Port, SetupInternal
17,493
final_call_stack = { "nested_subroutines": {"dummy": 16}, "complex_arrays": { "dummy": 4, "two_d": [[1, 2], [3, 4], [5, 6]], "my_arr": [complex(1, 0), complex(0, 1), complex(0.8, 0.6)], "second": [1, 2, 3, 4], }, } def files() -> list[str]: base_path = Path(__file__).parent.parent.parent / "qasm/visualize_pulse" plen = len(base_path.parts) FILES = list(base_path.glob("**/*.qasm")) return [str(Path(*path.parts[plen:])) for path in FILES] QASM_FILES = files() def common_files() -> list[str]: files = [] cut = -5 for q_file in QASM_FILES: files.append(q_file[:cut]) return files COMMON_FILES = common_files() @pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent.parent / "setups/interpreter.json" return SetupInternal.from_json(json_path) def test_visit_ClassicalDeclaration(): setup_path = Path(__file__).parent.parent.parent / "setups/complex.json" qasm_path = Path(__file__).parent.parent.parent / "qasm/interpreter/phase_freq.qasm" compiler = Compiler(qasm_path, setup_path) qasm_ast = compiler.load_program(qasm_path) ResolveIODeclaration().visit(qasm_ast) SemanticAnalyzer().visit(qasm_ast) DurationTransformer().visit(qasm_ast) pv = PulseVisualizer( SetupInternal.from_json(setup_path), waveform_functions.__dict__ ) activation_record = ActivationRecord(
final_call_stack = { "nested_subroutines": {"dummy": 16}, "complex_arrays": { "dummy": 4, "two_d": [[1, 2], [3, 4], [5, 6]], "my_arr": [complex(1, 0), complex(0, 1), complex(0.8, 0.6)], "second": [1, 2, 3, 4], }, } def files() -> list[str]: base_path = Path(__file__).parent.parent.parent / "qasm/visualize_pulse" plen = len(base_path.parts) FILES = list(base_path.glob("**/*.qasm")) return [str(Path(*path.parts[plen:])) for path in FILES] QASM_FILES = files() def common_files() -> list[str]: files = [] cut = -5 for q_file in QASM_FILES: files.append(q_file[:cut]) return files COMMON_FILES = common_files() @pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent.parent / "setups/interpreter.json" return SetupInternal.from_json(json_path) def test_visit_ClassicalDeclaration(): setup_path = Path(__file__).parent.parent.parent / "setups/complex.json" qasm_path = Path(__file__).parent.parent.parent / "qasm/interpreter/phase_freq.qasm" compiler = Compiler(qasm_path, setup_path) qasm_ast = compiler.load_program(qasm_path) ResolveIODeclaration().visit(qasm_ast) SemanticAnalyzer().visit(qasm_ast) DurationTransformer().visit(qasm_ast) pv = PulseVisualizer( SetupInternal.from_json(setup_path), waveform_functions.__dict__ ) activation_record = ActivationRecord(
name="main", ar_type=ARType.PROGRAM, nesting_level=1
2
2023-11-16 17:37:29+00:00
24k
quantuminterface/qiclib
src/qiclib/code/qi_jobs.py
[ { "identifier": "TaskRunner", "path": "src/qiclib/hardware/taskrunner.py", "snippet": "class TaskRunner(PlatformComponent):\n \"\"\"Driver to control the Taskrunner on the Hardware Platform.\"\"\"\n\n def __init__(\n self,\n name: str,\n connection,\n controller,\n qkit_instrument=True,\n ):\n super().__init__(name, connection, controller, qkit_instrument)\n self._stub = grpc_stub.TaskRunnerServiceStub(self._conn.channel)\n\n @property\n @platform_attribute\n @ServiceHubCall(\n errormsg=\"Could not fetch the current firmware hash of the Taskrunner\"\n )\n def firmware_hash(self):\n \"\"\"The hash of the current firmware running on the realtime core.\"\"\"\n return self._stub.GetStatus(proto.Empty()).firmware_hash\n\n @property\n @platform_attribute\n @ServiceHubCall(\n errormsg=\"Could not determine the build date of the Taskrunner firmware\"\n )\n def firmware_build_date(self):\n \"\"\"Returns the build date of the Taskrunner firmware.\"\"\"\n return self._stub.GetStatus(proto.Empty()).build_date\n\n @property\n @platform_attribute\n @ServiceHubCall(\n errormsg=\"Could not determine the build commit of the Taskrunner firmware\"\n )\n def firmware_build_commit(self):\n \"\"\"Returns the build commit hash of the Taskrunner firmware.\"\"\"\n return self._stub.GetStatus(proto.Empty()).build_commit\n\n @property\n @platform_attribute\n @ServiceHubCall(errormsg=\"Could not determine the status of the taskrunner\")\n def loaded_task(self):\n \"\"\"The name of the currently loaded task.\"\"\"\n return self._stub.GetStatus(proto.Empty()).task_name\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine the progress of the task\")\n def task_progress(self):\n \"\"\"Returns the progress of the task\"\"\"\n return self._stub.GetStatus(proto.Empty()).task_progress\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine number of available databoxes\")\n def databoxes_available(self):\n \"\"\"Returns the number of available databoxes.\"\"\"\n return self._stub.GetStatus(proto.Empty()).databoxes_available\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine state of the taskrunner\")\n def busy(self):\n \"\"\"Returns if the taskrunner is currently busy.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).busy\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine if task has finished\")\n def task_done(self):\n \"\"\"Returns if the task has finished.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).done\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine if task has error messages\")\n def task_errormsg_available(self):\n \"\"\"Returns if task has error messages.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).error_msg_available\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine if error message queue is full\")\n def task_errormsg_queue_full(self):\n \"\"\"Returns if if error message queue is full.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).error_msg_queue_full\n\n @ServiceHubCall(errormsg=\"Failed to start task\")\n def start_task(self, loop=False, overwrite=False):\n \"\"\"Starts the execution of a previously loaded task.\n\n :param loop: bool, optional\n if the task should be executed in a loop, by default False\n :param overwrite: bool, optional\n if a current running task should be stopped, by default False\n \"\"\"\n self._stub.StartTask(\n proto.StartTaskRequest(looping=loop, stop_running=overwrite)\n )\n\n @ServiceHubCall(errormsg=\"Failed to stop task\")\n def stop_task(self):\n \"\"\"Stops the execution of running task.\"\"\"\n self._stub.StopTask(proto.StopTaskRequest())\n\n @ServiceHubCall(errormsg=\"Failed to reset task\")\n def reset_task(self):\n \"\"\"Resets (unloads) a loaded task.\"\"\"\n self._stub.StopTask(proto.StopTaskRequest(reset=True))\n\n @ServiceHubCall(errormsg=\"Failed to load task binary\")\n def load_task_binary(self, filename, taskname):\n \"\"\"Loads a task binary into the taskrunner.\n The *taskname* needs to match the name of the task to load\n in order to verify that it is indeed the desired task file.\n\n :param filename: str\n name of the file with the task\n :param taskname: str\n name of the task\n\n :raises ValueError:\n if the path of the file is not found\n \"\"\"\n if not os.path.exists(filename):\n raise ValueError(\"File not found!\")\n\n with open(filename, \"rb\") as f:\n binary = f.read()\n self._stub.ProgramTask(proto.ProgramTaskRequest(name=taskname, task=binary))\n\n @ServiceHubCall(errormsg=\"Failed to compile and load task binary\")\n def load_task_source(self, filename, taskname):\n \"\"\"Loads a task source file `filename` into the taskrunner.\n `taskname` can be freely chosen to later identify the task on the platform.\n\n :param filename:\n name of the file with the task\n :param taskname:\n name of the task\n \"\"\"\n if os.path.isfile(filename):\n # File name can be full path to a file\n filepath = filename\n else:\n # or just the file name -> pick from task repository\n filepath = get_task_source(filename)\n\n with open(filepath, \"rb\") as f:\n binary = f.read()\n\n self._stub.CompileTask(proto.ProgramTaskRequest(name=taskname, task=binary))\n\n @ServiceHubCall(errormsg=\"Failed to set parameters\")\n def set_param_list(self, param_list):\n \"\"\"Sets the parameters for the task. param_list has to be an array of 32bit values.\"\"\"\n self._stub.SetParameter(proto.ParameterRequest(parameters=param_list))\n\n class DataMode(Enum):\n INT8 = 1\n UINT8 = 2\n INT16 = 3\n UINT16 = 4\n INT32 = 5\n UINT32 = 6\n INT64 = 7\n UINT64 = 8\n\n @ServiceHubCall(errormsg=\"Failed to fetch databoxes from taskrunner\")\n def get_databoxes_with_mode(\n self, mode=DataMode.INT32, require_done=True\n ) -> List[List[Any]]:\n \"\"\"Retrieves data from a previously started task on the R5.\n Depending on the parameter mode, the data is interpreted differently.\n\n :param mode:\n DataMode of the databoxes, by default DataMode.INT32\n :param require_done:\n if the task has to be finished before fetching data, by default True\n\n :return:\n A list of databoxes, being list of values themselves, either int32 or uint32.\n\n :raises Exception:\n If require_done is True and the Task is not finished\n :raises ValueError:\n If the data mode is not known\n :raises Exception:\n If require_done and not data is available\n \"\"\"\n self.check_task_errors()\n\n if require_done and not self.task_done:\n raise RuntimeError(\"Task should be finished prior to fetching data.\")\n\n method_call = {\n TaskRunner.DataMode.INT8: self._stub.GetDataboxesINT8,\n TaskRunner.DataMode.UINT8: self._stub.GetDataboxesUINT8,\n TaskRunner.DataMode.INT16: self._stub.GetDataboxesINT16,\n TaskRunner.DataMode.UINT16: self._stub.GetDataboxesUINT16,\n TaskRunner.DataMode.INT32: self._stub.GetDataboxesINT32,\n TaskRunner.DataMode.UINT32: self._stub.GetDataboxesUINT32,\n TaskRunner.DataMode.INT64: self._stub.GetDataboxesINT64,\n TaskRunner.DataMode.UINT64: self._stub.GetDataboxesUINT64,\n }.get(mode, None)\n if method_call is None:\n raise ValueError(\"Data mode is unknown! Only use DataMode Enum values.\")\n\n databoxes: List[List[Any]] = []\n last_index = -1\n for databox_reply in method_call(proto.Empty()):\n # print databox_reply.index, databox_reply.data[:]\n if last_index != databox_reply.index:\n # Create new (empty) databox in list\n databoxes.append([])\n last_index = databox_reply.index\n # Fill the latest databox with content\n databoxes[-1].extend(databox_reply.data[:])\n\n if require_done and not databoxes:\n raise RuntimeError(\n \"No data available to fetch. Are you sure the task completed successfully?\"\n )\n\n return databoxes\n\n def get_databoxes(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 32bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT32, require_done)\n\n def get_databoxes_INT8(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 8bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT8, require_done)\n\n def get_databoxes_UINT8(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 8bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT8, require_done)\n\n def get_databoxes_INT16(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 16bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT16, require_done)\n\n def get_databoxes_UINT16(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 16bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT16, require_done)\n\n def get_databoxes_INT32(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 32bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT32, require_done)\n\n def get_databoxes_UINT32(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 32bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT32, require_done)\n\n def get_databoxes_INT64(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 64bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT64, require_done)\n\n def get_databoxes_UINT64(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 64bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT64, require_done)\n\n @ServiceHubCall\n def get_error_messages(self):\n \"\"\"Retrieves all error messages from the task\"\"\"\n reply = self._stub.GetTaskErrorMessages(proto.Empty())\n return reply.message[:]\n\n def check_task_errors(self):\n errors = self.get_error_messages()\n if errors:\n raise RuntimeError(\n \"The following error messages were retrieved \"\n + \"from the Taskrunner:\\n{}\".format(\"\\n\".join(errors))\n )\n\n # DEPRECATED STUFF\n @property\n def data_size(self):\n \"\"\"TODO Replace by progress in all experiments.\"\"\"\n raise DeprecationWarning(\n \"data_size is not supported anymore! Use task_progress instead!\"\n )" }, { "identifier": "DataProvider", "path": "src/qiclib/experiment/qicode/data_provider.py", "snippet": "class DataProvider(ABC):\n \"\"\"\n Provides uniform access to experiment result data.\n\n Result data is received either from the taskrunner plugin or the unit cell plugin and comes in different formats.\n This class encapsulates the format differences, to allow for further processing of the data to be handled\n independently.\n \"\"\"\n\n @classmethod\n def create(cls, result, use_taskrunner: bool):\n if use_taskrunner:\n return _TaskrunnerDataProvider(result)\n return _InternalPluginDataProvider(result)\n\n def __init__(self, result):\n self._result = result\n\n @abstractmethod\n def get_raw_i(self, cell_index: int):\n pass\n\n @abstractmethod\n def get_raw_q(self, cell_index: int):\n pass\n\n def get_default_i(self, cell_index: int, index: int):\n return self.get_raw_i(cell_index)[index]\n\n def get_default_q(self, cell_index: int, index: int):\n return self.get_raw_q(cell_index)[index]\n\n def get_amp_pha_i(self, cell_index: int, index: int):\n return self.get_default_i(cell_index, index)\n\n def get_amp_pha_q(self, cell_index: int, index: int):\n return self.get_default_q(cell_index, index)\n\n @abstractmethod\n def get_iq_cloud_i(self, cell_index: int, index: int, recording_count: int):\n pass\n\n @abstractmethod\n def get_iq_cloud_q(self, cell_index: int, index: int, recording_count: int):\n pass\n\n def get_states(self, cell_index: int):\n return self._result[cell_index]\n\n def get_counts(self):\n return self.get_states(0)" }, { "identifier": "DataHandler", "path": "src/qiclib/experiment/qicode/data_handler.py", "snippet": "class DataHandler(ABC):\n \"\"\"\n Each subclass of this one handles a different way to process result data, depending on the type of experiment run.\n This usually includes splitting it up for the different boxes.\n It takes a list of cells and the recording data provider and processes it however it sees fit.\n In order to find out the box in which to store a recording it can access the `_result_recording_order` of a cell\n which provides the correct QiResult for the n-th executed recording.\n For examples, see the subclasses.\n\n :param data_provider: to access the experiments results\n :param cell_list: to store processed results there\n \"\"\"\n\n @staticmethod\n def _data_handler_factories() -> (\n Dict[str, Callable[[DataProvider, List[\"QiCell\"], int], \"DataHandler\"]]\n ):\n \"\"\"\n This is a method instead of a static variable, because forward references to the subclasses are not possible in\n static variable assignments.\n \"\"\"\n return {\n \"average\": lambda data_provider, cell_list, averages: _DefaultDataHandler(\n data_provider, cell_list\n ),\n \"amp_pha\": lambda data_provider, cell_list, averages: _AmplitudePhaseDataHandler(\n data_provider, cell_list\n ),\n \"iqcloud\": lambda data_provider, cell_list, averages: _IQCloudDataHandler(\n data_provider, cell_list\n ),\n \"raw\": lambda data_provider, cell_list, averages: _RawDataHandler(\n data_provider, cell_list\n ),\n \"states\": _StateDataHandler,\n \"counts\": lambda data_provider, cell_list, averages: _CountDataHandler(\n data_provider, cell_list\n ),\n \"quantum_jumps\": lambda data_provider, cell_list, averages: _QuantumJumpsDataHandler(\n data_provider, cell_list\n ),\n \"custom\": lambda data_provider, cell_list, averages: _NotImplementedDataHandler(\n data_provider, cell_list\n ),\n }\n\n @staticmethod\n def names():\n return DataHandler._data_handler_factories().keys()\n\n @classmethod\n def get_factory_by_name(\n cls, name: str\n ) -> Optional[Callable[[DataProvider, List[\"QiCell\"], int], \"DataHandler\"]]:\n factories = DataHandler._data_handler_factories()\n if name not in factories:\n return None\n return factories[name]\n\n @classmethod\n def get_custom_wrapper_factory(\n cls, custom_data_handler: Callable[[List[\"QiCell\"], DataProvider], None]\n ) -> Callable[[DataProvider, List[\"QiCell\"], int], \"DataHandler\"]:\n return lambda data_provider, cell_list, averages: _CustomDataHandlerWrapper(\n data_provider, cell_list, custom_data_handler\n )\n\n def __init__(self, data_provider: DataProvider, cell_list: List[\"QiCell\"]):\n self.data_provider = data_provider\n self.cell_list = cell_list\n\n @abstractmethod\n def process_results(self):\n pass" }, { "identifier": "SequencerInstruction", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SequencerInstruction:\n OPCODE_WIDTH = 7\n FUNCT3_WIDTH = 3\n FUNCT7_WIDTH = 7\n REGISTER_WIDTH = 5\n LOWER_IMMEDIATE_WIDTH = 12\n UPPER_IMMEDIATE_WIDTH = 20\n\n LOWER_IMM_MAX = (\n 2 ** (LOWER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Lower immediate 12 Bits - 1Bit Signed\n LOWER_IMM_MIN = -(2 ** (LOWER_IMMEDIATE_WIDTH - 1))\n\n UPPER_IMM_MAX = (\n 2 ** (UPPER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Upper immediate 20 Bits - 1Bit Signed\n UPPER_IMM_MIN = -(2 ** (UPPER_IMMEDIATE_WIDTH - 1))\n UPPER_IMM_MAX_UNSIGNED = 2**UPPER_IMMEDIATE_WIDTH\n\n imm_type = Union[int] # might include float in the future\n\n def __init__(self, OpCode: SeqOpCode) -> None:\n self.op = OpCode\n\n @staticmethod\n def is_value_in_lower_immediate(val: imm_type) -> bool:\n return (\n SequencerInstruction.LOWER_IMM_MIN\n <= val\n <= SequencerInstruction.LOWER_IMM_MAX\n )\n\n @staticmethod\n def is_value_in_unsigned_upper_immediate(val: imm_type) -> bool:\n return SequencerInstruction.UPPER_IMM_MAX_UNSIGNED >= abs(val)\n\n @abstractmethod\n def get_riscv_instruction(self) -> int:\n pass" }, { "identifier": "_QiVariableBase", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiVariableBase(QiExpression):\n \"\"\"Base class for QiVariables.\n Variables can be relevant to only a subset of QiCells, this subset is saved in _relevant_cells.\n Variables are simple expressions and, therefore, are typed.\n Variables can be compared by self.id.\"\"\"\n\n id_iter = itertools.count()\n str_id_iter = itertools.count()\n\n def __init__(\n self,\n type: QiType,\n value: Optional[Union[int, float]] = None,\n name=None,\n ):\n from .qi_jobs import QiCell\n\n assert isinstance(type, QiType)\n assert value is None or isinstance(value, (int, float))\n\n super().__init__()\n\n if type != QiType.UNKNOWN:\n self._type_info.set_type(type, _TypeDefiningUse.VARIABLE_DEFINITION)\n\n self.value = value\n\n self._value = value\n self._relevant_cells: Set[QiCell] = set()\n self.id = next(_QiVariableBase.id_iter)\n self.str_id = next(_QiVariableBase.str_id_iter)\n\n self._contained_variables.add(self)\n\n self.name = name\n\n @property\n def contained_variables(self):\n return self._contained_variables\n\n @staticmethod\n def reset_str_id():\n _QiVariableBase.str_id_iter = itertools.count()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_variable(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, _QiVariableBase) and self.id == other.id\n\n def __hash__(self) -> int:\n return self.id\n\n def __str__(self) -> str:\n return f\"QiVariable({self.name or ''})\"" }, { "identifier": "_QiCalcBase", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiCalcBase(QiExpression):\n \"\"\"Represents binary and unary operations.\"\"\"\n\n def __init__(self, val1, op, val2) -> None:\n super().__init__()\n\n self.val1 = val1\n self.op: QiOp = op\n self.val2 = val2\n\n from .qi_types import add_qi_calc_constraints\n\n add_qi_calc_constraints(op, val1, val2, self)\n\n @property\n def contained_variables(self):\n \"\"\"Function traverses the operation tree to determine which QiVariables are used for the calculations.\n Found QiVariables are added to _contained_variables\"\"\"\n if len(self._contained_variables) == 0:\n self._variables_to_container()\n\n return self._contained_variables\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_calc(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return (\n isinstance(other, _QiCalcBase)\n and self.op == other.op\n and self.val1._equal_syntax(other.val1)\n and self.val2._equal_syntax(other.val2)\n )\n\n def __str__(self):\n return (\n \"(\"\n + self.val1.__str__()\n + \" \"\n + self.op.value\n + \" \"\n + self.val2.__str__()\n + \")\"\n )" }, { "identifier": "_QiConstValue", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiConstValue(QiExpression):\n \"\"\"Represents QiExpression which are a constant (compiletime known) values.\n Integers can be used as either NORMAL, TIME or FREQUENCY values. It is up to the type inference to figure it out.\n If the value can be represented as a float value it has an additional attribute float_value which represents the value before\n it has been converted to the integer representation used by the sequencer.\n \"\"\"\n\n def __init__(self, value: Union[int, float]):\n super().__init__()\n\n self._given_value = value # Value given to the constructor. Is interpreted differently depending on the type.\n\n # Constant STATE values can only be 0 or 1, therefore we forbid QiType.STATE if we have a different value.\n if isinstance(self._given_value, float) or self._given_value not in [1, 0]:\n self._type_info.add_illegal_type(\n QiType.STATE, _IllegalTypeReason.INVALID_STATE_CONSTANT\n )\n\n if isinstance(self._given_value, float):\n self._type_info.add_illegal_type(\n QiType.NORMAL, _IllegalTypeReason.INVALID_NORMAL_CONSTANT\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME or self.type, QiType.FREQUENCY)\n return self._given_value\n\n @property\n def value(self):\n \"\"\"\n Integer representation of the constant value.\n Since the sequencer doesn't have a floating point unit, any calculations has to be using integers.\n In practice, this means we only perform fixpoint arithmetic and need to convert any float like value\n to such an fixpoint value.\n The correct conversion depends on the type.\n \"\"\"\n if self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n return self._given_value\n elif self.type == QiType.TIME:\n return int(util.conv_time_to_cycles(self._given_value, \"ceil\"))\n else:\n assert self.type == QiType.FREQUENCY\n return util.conv_freq_to_nco_phase_inc(self._given_value)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_constant(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n assert QiType.UNKNOWN not in (self.type, other.type)\n return isinstance(other, _QiConstValue) and self.value == other.value\n\n def __str__(self):\n if self.type in (QiType.TIME, QiType.FREQUENCY):\n value = self.float_value\n elif self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n value = self.value\n else:\n raise RuntimeError(\n \"This program point should be unreacheable. Please file a bug report.\"\n )\n return f\"{value:g}\"" }, { "identifier": "QiCellProperty", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiCellProperty(QiExpression):\n \"\"\"When describing experiments, properties of cells might not yet be defined. Instead a QiCellProperty object will be generated.\n This object can be used as length definition in cQiWait commands and QiPulse\"\"\"\n\n def __init__(self, cell, name):\n super().__init__()\n from .qi_jobs import QiCell\n\n self.name: str = name\n self.cell: QiCell = cell\n self.operations = lambda val: val\n self.opcode = \"x\"\n\n @property\n def opcode_p(self):\n \"\"\"Old opcode in parantheses for building new opcode\"\"\"\n return self.opcode if self.opcode == \"x\" else f\"({self.opcode})\"\n\n def resolve_equal(self, o: object) -> bool:\n if isinstance(o, QiCellProperty):\n return self.name == o.name and self.opcode == o.opcode\n elif o is None:\n return False\n try:\n return o == self()\n except KeyError:\n return False # At time of comparison, unresolved property is not equal to o\n\n def __call__(self):\n value = self.cell._properties.get(self.name)\n\n if isinstance(value, QiCellProperty) or value is None:\n raise KeyError(\"Property could not be resolved\")\n return self.operations(value)\n\n @property\n def value(self):\n if self.type == QiType.TIME:\n return util.conv_time_to_cycles(self())\n elif self.type == QiType.FREQUENCY:\n return util.conv_freq_to_nco_phase_inc(self())\n elif self.type == QiType.NORMAL:\n return self()\n elif self.type == QiType.STATE:\n return self()\n else:\n raise RuntimeError(\n \"Mising type information to resolve value to convert to a machine value.\"\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME, QiType.FREQUENCY)\n return self()\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_cell_property(self)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, QiCellProperty) and self.resolve_equal(other)\n\n def move_add_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations # Necessary because of recursion otherwise\n self.operations = lambda val: old_op(val) + x.value\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_radd_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value + old_op(val)\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_sub_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) - x.value\n self.opcode = f\"{self.opcode_p} - {x}\"\n return self\n\n def move_rsub_op_to_property(self, x: _QiConstValue):\n old_op = self.operations\n self.operations = lambda val: x.value - old_op(val)\n self.opcode = f\"{x} - {self.opcode_p}\"\n return self\n\n def move_mul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) * x.value\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n def move_rmul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value * old_op(val)\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n # These operations are not implemented for general QiExpressions\n # and are, therefore, left as they are.\n\n def __truediv__(self, x):\n if (isinstance(x, _QiConstValue) and x._given_value == 1) or x == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) / x\n self.opcode = f\"{self.opcode_p} / {x}\"\n return self\n\n def __rtruediv__(self, x):\n old_op = self.operations\n self.operations = lambda val: x / old_op(val)\n self.opcode = f\"{x} / {self.opcode_p}\"\n return self" }, { "identifier": "QiExpression", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiExpression:\n \"\"\"Superclass of every possible qicode expression.\"\"\"\n\n def __init__(self):\n self._contained_variables = QiVariableSet()\n self._type_info = _TypeInformation(self)\n\n @property\n def type(self):\n return self._type_info.type\n\n @staticmethod\n def _from(x):\n \"\"\"Creates an instance of QiExpression of the provided argument if possible.\"\"\"\n if isinstance(x, (float, int)):\n return _QiConstValue(x)\n elif isinstance(x, QiExpression):\n return x\n else:\n raise RuntimeError(f\"Can not create QiExpression from type {type(x)}.\")\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `accept`. This is a bug.\"\n )\n\n @property\n def contained_variables(self):\n \"\"\"Returns the variables used in this expression.\n QiExpression subclasses which contain variables (_QiCalcBase and _QiVariableBase) need to overwrite this.\n \"\"\"\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `contained_variables`. This is a bug.\"\n )\n\n def _variables_to_container(self):\n if isinstance(self, _QiVariableBase):\n self._contained_variables.add(self)\n elif isinstance(self, _QiCalcBase):\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `_equal_syntax`. This is a bug.\"\n )\n\n # QiCellProperties are supposed to support some form of constant folding.\n # However, originally, instead of implementing this in an extra pass over\n # QiJob they were added to the QiCellProperty class.\n # In order to keep support for this limited form of constant folding\n # This logic was placed here.\n\n # (I'm not sure why we don't fold when both operands are QiCellProperty.\n # And I think the reason we don't fold tow _QiConstValue is that originally\n # They were just int/float and would \"fold\" implicitely when using any\n # math operator on them)\n\n # If anyone ever feels the need to improve this situation, I would\n # encourage them to implement a constant folding pass using the existing\n # dataflow infrastructure.\n # This pdf seems to give a nice short introduction into the topic:\n # http://openclassroom.stanford.edu/MainFolder/courses/Compilers/docs/slides/15-02-constant-propagation-annotated.pdf\n\n def __add__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_add_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_radd_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.PLUS, x)\n\n def __radd__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_radd_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_add_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.PLUS, self)\n\n def __sub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_sub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rsub_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MINUS, x)\n\n def __rsub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rsub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_sub_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MINUS, self)\n\n def __mul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_mul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rmul_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MULT, x)\n\n def __rmul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rmul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_mul_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MULT, self)\n\n def __lshift__(self, x):\n return _QiCalcBase(self, QiOp.LSH, QiExpression._from(x))\n\n def __rshift__(self, x):\n return _QiCalcBase(self, QiOp.RSH, QiExpression._from(x))\n\n def __and__(self, x):\n return _QiCalcBase(self, QiOp.AND, QiExpression._from(x))\n\n def __rand__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.AND, self)\n\n def __or__(self, x):\n return _QiCalcBase(self, QiOp.OR, QiExpression._from(x))\n\n def __ror__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.OR, self)\n\n def __xor__(self, x):\n return _QiCalcBase(self, QiOp.XOR, QiExpression._from(x))\n\n def __rxor__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.XOR, self)\n\n def __invert__(self):\n return _QiCalcBase(self, QiOp.NOT, None)\n\n def __lt__(self, x):\n return QiCondition(self, QiOpCond.LT, QiExpression._from(x))\n\n def __le__(self, x):\n return QiCondition(self, QiOpCond.LE, QiExpression._from(x))\n\n def __gt__(self, x):\n return QiCondition(self, QiOpCond.GT, QiExpression._from(x))\n\n def __ge__(self, x):\n return QiCondition(self, QiOpCond.GE, QiExpression._from(x))\n\n def __eq__(self, x):\n return QiCondition(self, QiOpCond.EQ, QiExpression._from(x))\n\n def __ne__(self, x):\n return QiCondition(self, QiOpCond.NE, QiExpression._from(x))" }, { "identifier": "QiVariableSet", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiVariableSet:\n \"\"\"Class provides Set functionality for QiVariables.\n QiVariables overwrite comparison operations to build operation trees, to still allow comparisons ids are used.\n \"\"\"\n\n def __init__(self) -> None:\n self._var_list: List[\"_QiVariableBase\"] = []\n self._var_id_list: List[int] = []\n\n def __contains__(self, x):\n return x.id in self._var_id_list\n\n def add(self, x: \"_QiVariableBase\"):\n if x.id not in self._var_id_list:\n self._var_id_list.append(x.id)\n self._var_list.append(x)\n\n def update(self, var_set):\n for var in var_set:\n self.add(var)\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self._var_list):\n var = self._var_list[self.n]\n self.n += 1\n return var\n else:\n raise StopIteration\n\n def __len__(self):\n return len(self._var_list)" }, { "identifier": "QiCondition", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiCondition:\n \"\"\"Saves conditional comparisons.\n Can only be root node\"\"\"\n\n def __init__(\n self,\n val1: QiExpression,\n op: QiOpCond = QiOpCond.GT,\n val2: QiExpression = _QiConstValue(0),\n ) -> None:\n self._contained_variables = QiVariableSet()\n\n self.val1 = val1\n self.op = op\n self.val2 = val2\n\n from .qi_types import add_qi_condition_constraints\n\n add_qi_condition_constraints(op, val1, val2)\n\n @property\n def contained_variables(self):\n if len(self._contained_variables) == 0:\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n return self._contained_variables\n\n def accept(self, visitor):\n visitor.visit_condition(self)\n\n def __str__(self) -> str:\n return f\"{self.val1} {self.op.value} {self.val2}\"" }, { "identifier": "QiPulse", "path": "src/qiclib/code/qi_pulse.py", "snippet": "class QiPulse:\n \"\"\"\n Class to describe a single pulse.\n\n :param length: length of the pulse. This can also be a QiVariable for variable pulse lengths.\n :param shape: pulse shape (i.e. rect, gauss, ...)\n :param amplitude: relative amplitude of your pulse. This can also be a QiVariable for variable pulse amplitudes. NOT IMPLEMENTED\n :param phase: phase of the pulse in deg. (i.e. 90 for pulse around y-axis of the bloch sphere)\n :param frequency: Frequency of your pulse, which is loaded to the PulseGen\n \"\"\"\n\n Type = Union[float, _QiVariableBase]\n\n def __init__(\n self,\n length: Union[float, _QiVariableBase, str],\n shape: Shape = ShapeLib.rect,\n amplitude: Union[float, _QiVariableBase] = 1.0,\n phase: float = 0.0,\n frequency: Union[float, QiExpression, None] = None,\n hold=False,\n ):\n from .qi_jobs import QiCellProperty\n\n if isinstance(length, str):\n mode = length.lower()\n if not mode in [\"cw\", \"off\"]:\n raise ValueError(\"QiPulse with str length only accepts 'cw' or 'off'.\")\n length = util.conv_cycles_to_time(1)\n if mode == \"cw\":\n hold = True\n else:\n amplitude = 0\n else:\n mode = \"normal\"\n\n self.mode = mode\n self.shape = shape\n self.amplitude = amplitude\n self.phase = phase\n self._length = length\n self.frequency = (\n QiExpression._from(frequency) if frequency is not None else None\n )\n self.hold = hold\n self.shift_phase = False\n\n if self.frequency is not None:\n self.frequency._type_info.set_type(\n QiType.FREQUENCY, _TypeDefiningUse.PULSE_FREQUENCY\n )\n\n self.var_dict = {}\n\n if isinstance(length, QiExpression):\n length._type_info.set_type(QiType.TIME, _TypeDefiningUse.PULSE_LENGTH)\n\n if isinstance(length, _QiVariableBase):\n self.var_dict[\"length\"] = length\n if shape != ShapeLib.rect:\n raise NotImplementedError(\n \"Variable pulse lengths are only supported for rectangular pulses\"\n )\n elif isinstance(length, QiCellProperty):\n pass\n elif util.conv_time_to_cycles(length) >= 2**32:\n raise RuntimeError(\n f\"Pulse length exceeds possible wait time, cycles {util.conv_time_to_cycles(length)}\"\n )\n\n if isinstance(amplitude, _QiVariableBase):\n raise NotImplementedError(\"Variable Amplitude not implemented yet\")\n # self.var_dict[\"amplitude\"] = amplitude\n\n def _are_variable_length(self, other) -> bool:\n return self.is_variable_length and other.is_variable_length\n\n def _are_same_length(self, other) -> bool:\n return (\n not isinstance(self._length, _QiVariableBase)\n and not isinstance(other._length, _QiVariableBase)\n and (self._length is other._length)\n )\n\n def _are_same_amplitude(self, other) -> bool:\n return (\n not isinstance(self.amplitude, _QiVariableBase)\n and not isinstance(other.amplitude, _QiVariableBase)\n and (self.amplitude == other.amplitude)\n )\n\n def __eq__(self, o: object) -> bool:\n equal_length: bool = isinstance(o, QiPulse) and (\n self._are_variable_length(o) or self._are_same_length(o)\n )\n equal_amplitude: bool = isinstance(o, QiPulse) and self._are_same_amplitude(o)\n\n return (\n isinstance(o, QiPulse)\n and equal_length\n and equal_amplitude\n and (self.hold == o.hold)\n and (self.shape == o.shape)\n and (self.phase == o.phase)\n and (\n self.frequency._equal_syntax(o.frequency)\n if self.frequency is not None and o.frequency is not None\n else self.frequency is o.frequency\n )\n )\n\n def __call__(self, samplerate: float, **variables: Any) -> np.ndarray:\n \"\"\"\n Returns the pulse envelope for a given frequency.\n :param samplerate: sample rate for calculating the envelope\n :param variables: the variables for the length/amplitude function, if any; legacy of qup_pulses\n\n :return: envelope of the pulse as numpy array.\n \"\"\"\n from .qi_jobs import QiCellProperty\n\n if self.is_variable_length:\n # variable pulses are hold till ended by another pulse, so no need to use correct length\n return np.array([self.amplitude] * 4)\n\n length = (\n self._length() if isinstance(self._length, QiCellProperty) else self._length\n )\n\n if (\n util.conv_time_to_cycles(length) >= 2**32\n ): # check value again, QiCellproperty might be used\n raise RuntimeError(\n f\"Pulse length exceeds possible wait time, cycles {util.conv_time_to_cycles(length)}\"\n )\n\n amplitude = self.amplitude\n timestep = 1.0 / samplerate\n\n if length < timestep / 2.0:\n if length != 0:\n logging.warning(\n \"A pulse is shorter than %f ns and thus is omitted.\", length * 1e09\n )\n\n return np.zeros(0)\n\n time_fractions = np.arange(0, length, timestep) / length\n envelope = amplitude * self.shape(time_fractions)\n\n return envelope\n\n @property\n def length(self):\n return self.var_dict.get(\"length\", self._length)\n\n @property\n def variables(self):\n return list(self.var_dict.values())\n\n @property\n def is_variable_length(self):\n return isinstance(self._length, _QiVariableBase)\n\n def _stringify_args(self) -> str:\n \"\"\"Determines non-default args to explicitly stringify\"\"\"\n arg_strings = []\n defaults = self.__init__.__defaults__\n\n if self.mode == \"normal\":\n arg_strings.append(str(self.length))\n else:\n arg_strings.append(f'\"{self.mode}\"')\n\n if self.shape != defaults[0]:\n arg_strings.append(f\"shape={self.shape}\")\n if not _equal(self.amplitude, defaults[1]) and self.mode != \"off\":\n arg_strings.append(f\"amplitude={self.amplitude}\")\n if not _equal(self.phase, defaults[2]):\n arg_strings.append(f\"phase={self.phase}\")\n if not _equal(self.frequency, defaults[3]):\n arg_strings.append(f\"frequency={self.frequency}\")\n\n return \", \".join(arg_strings)\n\n def _stringify(self) -> str:\n return f\"QiPulse({self._stringify_args()})\"" }, { "identifier": "QiCMContainedCellVisitor", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiCMContainedCellVisitor(QiCommandVisitor):\n \"\"\"Visitor to check which cells are used inside context managers.\"\"\"\n\n def __init__(self) -> None:\n self.contained_cells: Set[QiCell] = set()\n\n def visit_cell_command(self, cell_cmd):\n self.contained_cells.update(cell_cmd._relevant_cells)\n\n def visit_context_manager(self, context_manager):\n visitor = QiCMContainedCellVisitor()\n for item in context_manager.body:\n item.accept(visitor)\n\n context_manager._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_if(self, if_cm):\n visitor = QiCMContainedCellVisitor()\n for command in if_cm.body:\n command.accept(visitor)\n\n for command in if_cm._else_body:\n command.accept(visitor)\n\n if_cm._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_parallel(self, parallel_cm):\n visitor = QiCMContainedCellVisitor()\n for cmd_list in parallel_cm.entries:\n for cmd in cmd_list:\n cmd.accept(visitor)\n\n parallel_cm._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_variable_command(self, variable_cmd):\n self.contained_cells.update(variable_cmd._relevant_cells)\n\n def visit_sync_command(self, sync_cmd):\n self.contained_cells.update(sync_cmd._relevant_cells)\n\n def visit_asm_command(self, asm_cmd):\n self.contained_cells.update(asm_cmd._relevant_cells)\n\n def visit_mem_store_command(self, store_cmd):\n self.contained_cells.update(store_cmd._relevant_cells)" }, { "identifier": "QiResultCollector", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiResultCollector(QiCommandVisitor):\n def __init__(self):\n # If there are multiple QiResults used, we need to\n # simulate in which order they record.\n self.found_qi_results = set()\n # We also collect the recordings which contain the qi_results above\n self.corresponding_recordings = set()\n\n # Is a recording which saves to a QiResult within an if.\n # In these cases we can not necessarily simulate the recording order.\n self.recording_in_if = False\n\n self.if_else_depth = 0\n\n def visit_cell_command(self, cell_cmd):\n from .qi_jobs import cQiRecording, cQiPlayReadout\n\n if isinstance(cell_cmd, cQiPlayReadout) and cell_cmd.recording is not None:\n cell_cmd = cell_cmd.recording\n\n if isinstance(cell_cmd, cQiRecording):\n if self.if_else_depth > 0:\n self.recording_in_if = True\n\n self.found_qi_results.add(cell_cmd.save_to)\n self.corresponding_recordings.add(cell_cmd)\n\n def visit_if(self, if_cm):\n self.if_else_depth += 1\n\n for cmd in if_cm.body:\n cmd.accept(self)\n\n for cmd in if_cm.body:\n cmd.accept(self)\n\n self.if_else_depth -= 1\n\n def visit_parallel(self, parallel_cm):\n for cmd in parallel_cm.body:\n cmd.accept(self)\n\n def visit_for_range(self, for_range_cm):\n for cmd in for_range_cm.body:\n cmd.accept(self)" }, { "identifier": "QiVarInForRange", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiVarInForRange(QiCommandVisitor):\n \"\"\"Visitor used to visit QiCommands inside ForRange-Contextmanager. Raises error, if variable used in ForRange-Head is target of an Assign or Store\n command inside ForRange-Body. Additionally generates UserWarning when loop-variable is used inside Parallel-CM.\n \"\"\"\n\n def __init__(self, var) -> None:\n self.var = var\n\n def raise_exception(self):\n raise RuntimeError(\n \"Variable used in ForRange must not be used in internal Assign-Commands, var: \"\n + str(self.var)\n )\n\n def visit_cell_command(self, cell_cmd):\n from .qi_jobs import cQiStore\n\n if isinstance(cell_cmd, cQiStore):\n if id(cell_cmd.store_var) == id(self.var):\n self.raise_exception()\n\n def visit_context_manager(self, context_manager):\n for item in context_manager.body:\n item.accept(self)\n\n def visit_if(self, if_cm):\n for command in if_cm.body:\n command.accept(self)\n\n for command in if_cm._else_body:\n command.accept(self)\n\n def visit_parallel(self, parallel_cm):\n if self.var in parallel_cm._associated_variable_set:\n raise RuntimeError(\n \"Loop variable inside Parallel Context Manager might result in unexpected behaviour. \"\n \"Please unroll loop or change variable.\"\n )\n\n def visit_variable_command(self, variable_cmd):\n pass\n\n def visit_assign_command(self, assign_cmd):\n if id(assign_cmd.var) == id(self.var):\n self.raise_exception()\n\n def visit_sync_command(self, sync_cmd):\n pass" }, { "identifier": "QiProgramBuilder", "path": "src/qiclib/code/qi_prog_builder.py", "snippet": "class QiProgramBuilder:\n def __init__(\n self,\n cell_list: List[Any],\n cell_map: List[Any],\n command_list: List[Any],\n skip_nco_sync: bool = False,\n nco_sync_length: float = 0,\n ) -> None:\n from .qi_sequencer import Sequencer\n\n self.cell_seq_dict: Dict[Any, Sequencer] = {}\n self.result_boxes = []\n\n for cell, index in zip(cell_list, cell_map):\n self.cell_seq_dict[cell] = Sequencer(cell_index=index)\n\n for resultbox in cell._result_container.values():\n self.result_boxes.append(resultbox)\n\n self.cell_map = cell_map\n\n self.command_list = command_list\n\n self.skip_nco = skip_nco_sync\n self.nco_length = nco_sync_length\n\n @staticmethod\n def assign_cell_to_context_manager(commands: List[Any]):\n contained_cells_visitor = QiCMContainedCellVisitor()\n for command in commands:\n command.accept(contained_cells_visitor)\n\n @staticmethod\n def assign_variables_to_cell(commands: List[Any]):\n cell_to_variable_visitor = QiCmdVariableInspection()\n for command in reversed(commands):\n command.accept(cell_to_variable_visitor)\n\n QiProgramBuilder.assign_cell_to_context_manager(\n commands\n ) # run again, to ensure all Assignment statements are considered as well\n\n def build_program(self):\n for cell, sequencer in self.cell_seq_dict.items():\n cell.reset()\n\n if self.skip_nco is False:\n sequencer.add_nco_sync(self.nco_length)\n\n self.assign_cell_to_context_manager(self.command_list)\n\n self.assign_variables_to_cell(self.command_list)\n\n prog_builder = ProgramBuilderVisitor(self.cell_seq_dict, self.cell_map)\n\n for command in self.command_list:\n command.accept(prog_builder)\n\n for sequencer in self.cell_seq_dict.values():\n sequencer.end_of_program()\n\n return self.cell_seq_dict\n\n def get_all_variables(self) -> Dict[Any, Dict[Any, int]]:\n vars: Dict[Any, Dict[Any, int]] = {}\n for cell, seq in self.cell_seq_dict.items():\n for var in cell._relevant_vars:\n if var not in vars:\n vars[var] = {}\n vars[var][cell] = seq.get_var_register(var).adr\n return vars" }, { "identifier": "QiType", "path": "src/qiclib/code/qi_types.py", "snippet": "class QiType(Enum):\n \"\"\"The type that a :class:`~qiclib.code.qi_var_definitions.QiExpression` has.\"\"\"\n\n UNKNOWN = 0\n TIME = 1\n \"\"\"Time values contain some amount of times (in cycles) that, for example, can be used in wait commands.\n They are specified using float (seconds) and are converted to cycles automatically.\n \"\"\"\n STATE = 2\n \"\"\"State values are the result of a recording.\"\"\"\n NORMAL = 3\n \"\"\"Freely usable integer values.\"\"\"\n FREQUENCY = 4\n \"\"\"\n Frequency values can be used in the Play/PlayReadout commands and, like TIME, are specified using floats.\n \"\"\"" }, { "identifier": "QiPostTypecheckVisitor", "path": "src/qiclib/code/qi_types.py", "snippet": "class QiPostTypecheckVisitor(QiJobVisitor):\n \"\"\"Checks that every variable has an assigned type.\n The start and end values of ForRanges over time values are converted to cycles, because we only know with\n certainty whether they iterate over NORMAL or TIME values after the QiTypeFallbackVisitor has run.\n \"\"\"\n\n def __init__(self):\n pass\n\n def visit_for_range(self, for_range_cm):\n from qiclib.packages.constants import CONTROLLER_CYCLE_TIME\n from .qi_var_definitions import _QiConstValue, QiType\n from .qi_jobs import ForRange\n import numpy as np\n\n for_range_cm: ForRange = for_range_cm\n\n for_range_cm.var.accept(self)\n for_range_cm.start.accept(self)\n for_range_cm.end.accept(self)\n\n super().visit_for_range(for_range_cm)\n\n if for_range_cm.var.type == QiType.TIME:\n if isinstance(for_range_cm.start, _QiConstValue):\n if for_range_cm.start.value < 0:\n raise RuntimeError(\n f\"ForRange with negative time value ({for_range_cm.start._given_value}) are not allowed\"\n )\n\n if for_range_cm.end.value == 0:\n warnings.warn(\"End value of 0 will not be included in ForRange.\")\n\n # round to 11 decimals, if result is CONTROLLER_CYCLE_TIME then float modulo probably failed\n if (\n round(np.mod(for_range_cm.step._given_value, CONTROLLER_CYCLE_TIME), 11)\n != 0\n and round(\n np.mod(for_range_cm.step._given_value, CONTROLLER_CYCLE_TIME), 11\n )\n != CONTROLLER_CYCLE_TIME\n ):\n raise RuntimeError(\n f\"When using QiTimeVariables define step size as multiple of {CONTROLLER_CYCLE_TIME*1e9:.3g} ns.\"\n f\" (It is currently off by {np.mod(for_range_cm.step._given_value, CONTROLLER_CYCLE_TIME)*1e9:.3g} ns.)\"\n )\n elif (\n for_range_cm.var.type == QiType.FREQUENCY\n and isinstance(for_range_cm.end, _QiConstValue)\n and for_range_cm.end.value == 0\n ):\n warnings.warn(\"End value of 0 will not be included in ForRange.\")\n\n def visit_assign_command(self, assign_cmd):\n assign_cmd.var.accept(self)\n super().visit_assign_command(assign_cmd)\n\n def visit_constant(self, const):\n from .qi_var_definitions import QiType\n\n if const.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {const}.\")\n\n def visit_variable(self, var):\n from .qi_var_definitions import QiType\n\n if var.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {var}.\")\n\n def visit_calc(self, calc):\n from .qi_var_definitions import QiType\n\n super().visit_calc(calc)\n if calc.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {calc}.\")\n\n def visit_cell_property(self, cell_prop):\n if cell_prop.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {cell_prop}\")" }, { "identifier": "QiTypeFallbackVisitor", "path": "src/qiclib/code/qi_types.py", "snippet": "class QiTypeFallbackVisitor(QiJobVisitor):\n \"\"\"Sets the the fallback type to NORMAL for _QiConstValue if they weren't given a type during QiJob construction.\n This is important for qicode like the following:\n\n .. code-block:: python\n\n with ForRange(x, 0, 10, 1):\n ...\n\n Here, x could theoretically be either of type TIME or NORMAL because int literals can have either type.\n However, we want this code to compile to with integer semantics which is why we need this visitor to run\n after job construction. (see QiJob __exit__ method).\n \"\"\"\n\n def visit_for_range(self, for_range_cm):\n from .qi_var_definitions import QiType\n\n if for_range_cm.var.type == QiType.UNKNOWN:\n for_range_cm.var._type_info.set_type(QiType.NORMAL, _TypeFallback.INT)\n\n super().visit_for_range(for_range_cm)\n\n def visit_constant(self, const):\n from .qi_var_definitions import QiType\n\n if const.type == QiType.UNKNOWN:\n if isinstance(const._given_value, float):\n const._type_info.set_type(QiType.TIME, _TypeFallback.FLOAT)\n else:\n assert isinstance(const._given_value, int)\n const._type_info.set_type(QiType.NORMAL, _TypeFallback.INT)" }, { "identifier": "_TypeDefiningUse", "path": "src/qiclib/code/qi_types.py", "snippet": "class _TypeDefiningUse(_TypeFact, Enum):\n VARIABLE_DEFINITION = 0\n VALUE_DEFINITION = 1\n SHIFT_EXPRESSION = 2\n PULSE_LENGTH = 3\n RECORDING_SAVE_TO = 4\n WAIT_COMMAND = 5\n RECORDING_OFFSET_EXPRESSION = 6\n PULSE_FREQUENCY = 7\n\n def to_error_message(self) -> str:\n return {\n _TypeDefiningUse.VARIABLE_DEFINITION: \"has been defined by the user as this type\",\n _TypeDefiningUse.VALUE_DEFINITION: \"has been defined by the user as this type\",\n _TypeDefiningUse.SHIFT_EXPRESSION: \"is used as right hand side of shift expression\",\n _TypeDefiningUse.PULSE_LENGTH: \"is used as length of pulse\",\n _TypeDefiningUse.RECORDING_SAVE_TO: \"is used as save_to of recording command\",\n _TypeDefiningUse.WAIT_COMMAND: \"is used as length in wait command\",\n _TypeDefiningUse.RECORDING_OFFSET_EXPRESSION: \"is used as an recording offset\",\n _TypeDefiningUse.PULSE_FREQUENCY: \"is used as pulse frequency.\",\n }[self]" } ]
import os import json import functools import warnings import numpy as np import qiclib.packages.utility as util from abc import abstractmethod from typing import Dict, List, Callable, Optional, Union, Set, Any, Type from ..hardware.taskrunner import TaskRunner from ..experiment.qicode.data_provider import DataProvider from ..experiment.qicode.data_handler import DataHandler from .qi_seq_instructions import SequencerInstruction from .qi_var_definitions import ( _QiVariableBase, _QiCalcBase, _QiConstValue, QiCellProperty, QiExpression, QiVariableSet, QiCondition, ) from .qi_pulse import QiPulse from .qi_visitor import ( QiCMContainedCellVisitor, QiResultCollector, QiVarInForRange, ) from .qi_prog_builder import QiProgramBuilder from .qi_types import ( QiType, QiPostTypecheckVisitor, QiTypeFallbackVisitor, _TypeDefiningUse, ) from .qi_types import _TypeDefiningUse from .qi_types import _TypeDefiningUse from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .analysis.qi_insert_mem_parameters import ( insert_recording_offset_store_commands, insert_manipulation_pulse_frequency_store_commands, insert_readout_pulse_frequency_store_commands, ) from .qi_simulate import Simulator from ..experiment.qicode.base import QiCodeExperiment from qiclib.experiment.qicode.base import _TaskrunnerSettings from .qi_visitor import QiStringifyJob
16,249
------- .. code-block:: python qic: QiController = ... sample: QiSample = ... with QiJob() as job: q = QiCells(1) Readout(q[0], save_to="result") job.run(qic, sample, averages=1000) data = job.cells[0].data("result") :param name: The name of the variable, by default None """ def __init__(self, name: Optional[str] = None) -> None: self._cell = None self.data = None self.recording_count = 0 self.name: str = "" if name is None else name def get(self) -> np.ndarray: """gets the data of the result as a numpy array :return: The data of the experiment """ return np.array(self.data) def __str__(self) -> str: return f'QiResult("{self.name}")' class QiCommand: """Base class of every Job command. Provides _relevant_cells, containing every cell used for the execution of the command. Provides _associated_variable_set, containing every variable needed for the execution of the command. """ def __init__(self) -> None: self._associated_variable_set = QiVariableSet() self._relevant_cells: Set[QiCell] = set() @abstractmethod def accept(self, visitor, *input): raise RuntimeError( f"{self.__class__} doesn't implement `accept`. This is a bug." ) def is_variable_relevant(self, variable: _QiVariableBase) -> bool: return variable in self._associated_variable_set def add_associated_variable(self, x): if isinstance(x, _QiVariableBase): self._associated_variable_set.add(x) def __str__(self) -> str: return "cQiCommand" def _stringify(self) -> str: raise NotImplementedError(f"_stringify not implemented for {repr(self)}") _QiJobReference = None def _add_cmd_to_job(cmd: QiCommand): if _QiJobReference is None: raise RuntimeError("Can not use command outside QiJob context manager.") _QiJobReference._add_command(cmd) def _set_job_reference(job): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = job def _delete_job_reference(): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = None class QiCell: """A QiCell is an abstract representation of the qubit/cell the program is run on. Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object. For a single :python:`QiCell`, use instead :python:`QiCells(1)` A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context. The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`. For this, index the :python:`QiCell` object using the name of the property: .. code-block:: python q: QiCell = ... t1_time = q["t1"] The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a :class:`QiJob` and providing the actual sample. **Tasks of the QiCell**: - Saves the pulses needed for program execution. - Provides a dictionary functionality to define commonly used durations/properties. - Implements a Sequencer object, which contains the assembler program after compilation. :param cellID: A unique ID :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob` """ def __init__(self, cellID: int): if not isinstance(_QiJobReference, QiJob): raise RuntimeError("QiCell can't be used outside of QiJob.") self.cellID = cellID
# Copyright © 2017-2023 Quantum Interface ([email protected]) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ This is the main module of QiCode. Here, all important commands write QiPrograms are defined. """ class QiResult: """Result of an experiment. Can be accessed via :python:`job.cells[cell_index].data("result name")`. Where :python:`cells` denotes a :class:`QiCells` object and :python:`cell_index` an integer. The actual data can be retrieved as a numpy array using the :meth:`get` Method Example ------- .. code-block:: python qic: QiController = ... sample: QiSample = ... with QiJob() as job: q = QiCells(1) Readout(q[0], save_to="result") job.run(qic, sample, averages=1000) data = job.cells[0].data("result") :param name: The name of the variable, by default None """ def __init__(self, name: Optional[str] = None) -> None: self._cell = None self.data = None self.recording_count = 0 self.name: str = "" if name is None else name def get(self) -> np.ndarray: """gets the data of the result as a numpy array :return: The data of the experiment """ return np.array(self.data) def __str__(self) -> str: return f'QiResult("{self.name}")' class QiCommand: """Base class of every Job command. Provides _relevant_cells, containing every cell used for the execution of the command. Provides _associated_variable_set, containing every variable needed for the execution of the command. """ def __init__(self) -> None: self._associated_variable_set = QiVariableSet() self._relevant_cells: Set[QiCell] = set() @abstractmethod def accept(self, visitor, *input): raise RuntimeError( f"{self.__class__} doesn't implement `accept`. This is a bug." ) def is_variable_relevant(self, variable: _QiVariableBase) -> bool: return variable in self._associated_variable_set def add_associated_variable(self, x): if isinstance(x, _QiVariableBase): self._associated_variable_set.add(x) def __str__(self) -> str: return "cQiCommand" def _stringify(self) -> str: raise NotImplementedError(f"_stringify not implemented for {repr(self)}") _QiJobReference = None def _add_cmd_to_job(cmd: QiCommand): if _QiJobReference is None: raise RuntimeError("Can not use command outside QiJob context manager.") _QiJobReference._add_command(cmd) def _set_job_reference(job): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = job def _delete_job_reference(): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = None class QiCell: """A QiCell is an abstract representation of the qubit/cell the program is run on. Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object. For a single :python:`QiCell`, use instead :python:`QiCells(1)` A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context. The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`. For this, index the :python:`QiCell` object using the name of the property: .. code-block:: python q: QiCell = ... t1_time = q["t1"] The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a :class:`QiJob` and providing the actual sample. **Tasks of the QiCell**: - Saves the pulses needed for program execution. - Provides a dictionary functionality to define commonly used durations/properties. - Implements a Sequencer object, which contains the assembler program after compilation. :param cellID: A unique ID :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob` """ def __init__(self, cellID: int): if not isinstance(_QiJobReference, QiJob): raise RuntimeError("QiCell can't be used outside of QiJob.") self.cellID = cellID
self.manipulation_pulses: List[QiPulse] = []
11
2023-11-10 10:26:10+00:00
24k
fg320/DEASC
examples/12C_5x1_farm_dyn_tuning_wso_grouping_looping.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by pointing towards an input file.\n (FLORIS interface object).\n\n Args\n ----\n input file:(FLORIS .json input file).\n \"\"\"\n # Read and initialize input file\n self.input_file = input_file\n self.interface = floris_input_handler(self.input_file, path)\n\n # Assign wind farm model proporties\n self.D, self.H_hub, self.n_turbs = floris_properties(self)\n\n def set_aligned_layout(self, n_row, n_col, spac_x, spac_y, coordinates=False):\n \"\"\"\n Modify farm layout in aligned wind turbines with constant spacing,\n differing only from rows to columns. Flow field is also reinitialized.\n\n Args\n ----\n n_row: (float) number of turbine rows\n n_col: (float) number of turbine columns\n spac_x: (float) WT diam normalized turbines distance in x direction\n spac_y: (float) WT diam normalized turbines distance in y direction\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Input type check\n if not all(isinstance(i, int) for i in [n_row, n_col]) or \\\n not all(isinstance(j, (int, float)) for j in [spac_x, spac_y]):\n err_msg = \"Incorrect input value types\"\n raise ValueError(err_msg)\n\n # Calculate new coordinate farm layout\n layout_x = []\n layout_y = []\n for i in range(int(n_row)):\n for j in range(int(n_col)):\n layout_x.append(i * spac_x * self.D)\n layout_y.append(j * spac_y * self.D)\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def set_HR_layout(self, coordinates=False):\n \"\"\"\n Set Horns Rev wind farm layout to wind farm object and\n returns turbines' x and y coordinates if coordinates=True.\n\n Args\n ----\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Vestas V80 2 MW diameter check\n if self.D != 80:\n warning = \"Rotor diameter not from the Vestas V80 2 MW turbine\"\n warnings.warn(warning, UserWarning)\n\n n_rows = 10\n n_cols = 8\n spac_x = 7\n spac_y = 7\n angle = 6\n layout_x = []\n layout_y = []\n for i in range(int(n_rows)):\n for j in range(int(n_cols)):\n layout_x.append((i * spac_x * self.D) -\n (np.sin(np.radians(angle)) * j * spac_y * self.D))\n layout_y.append(j * spac_y * self.D * np.cos(np.radians(angle)))\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def farm_eval(self, yaw=None, ws=None, wd=None, ti=None, shear=None):\n \"\"\"\n Calculate farm flow field for given wind farm layout and input conditions.\n Return main outputs, such as yaw angles, turbines power, farm power, etc.\n\n Args\n ----\n yaw: (list, optional) turbines yaw angles (deg). Default to None.\n ws: (float, optional) input wind speeds (m/s). Default to None.\n wd: (float, optional) input wind directions (deg). Default to None.\n ti: (float, optional) input turbulence intensity. Default to None.\n shear: (float, optional) shear exponent. Default to None.\n\n Returns\n -------\n wf_pow: (float) WF power (MWatts).\n wt_pow: (np.array) WTs power (MWatts).\n wt_ti: (list) WTs turbulence intensity.\n wt_yaw: (np.array) WTs yaw angles (deg).\n \"\"\"\n # Main wind farm calculation\n wf_pow, wt_pow, wt_ti, wt_yaw, _ = floris_farm_eval(self,\n yaw,\n ws,\n wd,\n ti,\n shear)\n\n return (wf_pow, wt_pow, wt_ti, wt_yaw)\n\n def pow_yaw_sweep_1var(self, layout, var_info):\n \"\"\"\n Return wind farm power for a single yaw variable, either a\n single turbine or a single row of turbines. Sweep by row not possible\n for not aligned \"custom\" layouts.\n\n Args\n ----\n layout: (tuple)\n row: (integer) number of farm rows\n cols: (integer) number of farm columns\n or string \"custom\"\n var_info: (tuple)\n var_type: (string) \"T\" for turbine,\n \"R\" for row (not for custom layouts)\n var: (integer) turbine or row number\n var_value: (list of floats) variable values\n\n Returns\n -------\n obj_out: tuple\n obj: (list) objective values\n obj_func: (string) objective function\n var_info: (tuple) see input\n model: (string) model name\n \"\"\"\n # Extract inputs and check inputs\n var_type, var, var_value = var_info\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'R' and layout == \"custom\":\n err_msg = \"Row not allowed for custom layouts\"\n raise ValueError(err_msg)\n if var_type == 'R' and var > rows:\n err_msg = \"Row specified not in farm\"\n raise ValueError(err_msg)\n if var_type == 'T' and var > self.n_turbs:\n err_msg = \"Turbine specified not in farm\"\n raise ValueError(err_msg)\n\n # Calculations\n yaw_angles = np.array(floris_current_yaw(self))\n wf_pow = []\n\n for yaw_change in var_value:\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'T':\n yaw_angles[(var-1)] = yaw_change\n elif var_type == 'R':\n idx_1 = var*cols\n idx_0 = idx_1-cols\n yaw_angles[idx_0:idx_1] = yaw_change\n else:\n err_msg = \"var_type either 'T' or 'R'\"\n raise ValueError(err_msg)\n\n wf_pow_single, _, _, _ = self.farm_eval(yaw=yaw_angles)\n wf_pow.append(wf_pow_single)\n\n obj_out = (wf_pow, 'Farm Power')\n var_info = (var_type, var, var_value)\n print(\"Function exploration complete\")\n\n return obj_out, var_info" }, { "identifier": "WSOpt", "path": "deasc/wake_steering.py", "snippet": "class WSOpt:\n \"\"\"\n Class to perform wake steering optimization with a WfModel object, given an a-priori\n specified wind farm layout and specified atmopheric conditions. Optimization can have\n all/some turbines as variables, or rows for wind farms with equal columns. Optimizers\n available are the local SLSQP, where linear constraints can be added, and the global\n optimizer TuRBO.\n \"\"\"\n\n def __init__(self,\n wf_model,\n inflow,\n variables,\n var_bounds,\n var_initial,\n opt_method=\"SLSQP\",\n opt_options=None,\n obj_function=\"Farm Power\",\n constraints=(None, None, None),\n by_row=(False, None, None),\n tuning_dynamic=False\n ):\n \"\"\"\n Args\n ----\n wf_model: (WfModel)\n WfModel to perform wake steering optimization.\n inflow: (list) Inflow conditions for wake steering optimization.\n yaw_initial: (list) wind farm yaw angles (deg).\n (string) 'random' for random intial wind farm yaw angles.\n wd: (float) input wind directions (deg).\n ws: (float) input wind speeds (m/s).\n ti: (float) input turbulence intensity.\n shear: (float) shear exponent.\n variables: (list)\n List of turbines (or rows) to optimize. Naming convention starts from 1.\n var_bounds: (tuple)\n low_bound: (float) variable (yaw angle) lower bound.\n upp_bound: (float) variable (yaw angle) upper bound.\n var_initial:\n SLSQP: (list) list of initial variable values for each variable.\n (string) 'random' for random initial variable values.\n TURBO_1: (list of lists) list of n_init variable values lists\n (see TURBO_1 options).\n (string) 'LHS' latin hypercube sampling.\n TURBO_M: (string) 'LHS' latin hypercube sampling.\n opt_method: (string, optional) optimization method.\n 'SLSQP', 'TURBO_1 and 'TURBO_M' available.\n Default set to 'SLSQP'.\n opt_options: (dict , optional) optimization method options dictionary.\n Default set to None.\n opt_function: (string , optional) objective function. 'Farm Power' available\n Default set to 'Farm Power'.\n constraints: (tuple) Linear constraints definition. Limited to SLSQP.\n A: (matrix) linear constraint matrix.\n Default set to None.\n low_bound_constr: (float) lower non-normalized contraint bound.\n Default set to None.\n upp_bnd_constr: (float) upper non-normalized contraint bound.\n Default set to None.\n by_row : (tuple, optional) Optimization by row, requires all farm columns to have\n the same amount of rows.\n by_row_bool: (bool) True if optimization variables are wind farm rows,\n False if wind farm turbines. Default set to False.\n rows:: (int) wind farm rows. Default set to None.\n cols:: (int) wind farm columns. Default set to None.\n tuning_dynamic : (bool, optional)\n If True, include dynamic parameter tuning. See tuning_dynamic_initialize\n method. Default to False.\n \"\"\"\n # Opt Methods - Opt Options - Optimizers - Opt Functions\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\", \"TURBO_M\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-6,\n 'eps': 0.01},\n \"TURBO_1\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"},\n \"TURBO_M\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"n_trust_regions\": 2,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.optimizer_dict = {'SLSQP': self._optimizer_scipy,\n 'TURBO_1': self._optimizer_turbo_1,\n 'TURBO_M': self._optimizer_turbo_m}\n self.obj_function_dict = {'Farm Power': self._obj_function_power}\n\n # Optimization methods and optimizer\n self.opt_method = opt_method\n self._opt_method_settler()\n self.optimizer = self.optimizer_dict[self.opt_method]\n\n # Optimizer options\n self.opt_options = opt_options\n self._opt_options_settler()\n\n # Optimization function\n self.obj_function_name = obj_function\n self._obj_function_settler()\n\n # Wind farm conditions\n self.wf_model = wf_model\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.yaw_initial, self.wd, self.ws, self.ti, self.shear = inflow\n if not isinstance(self.yaw_initial, (list, np.ndarray)):\n if self.yaw_initial == 'random':\n self.yaw_initial = self._random_yaw_generator(self.wf_model.n_turbs,\n var_bounds)\n self._yaw_initial_input_handler()\n self.yaw_initial = np.array([float(item) for item in self.yaw_initial])\n\n # Optimization per wind turbine or per wind farm row\n self.by_row_bool = by_row[0]\n if self.by_row_bool:\n self.rows = by_row[1]\n self.cols = by_row[2]\n self._by_row_input_handler()\n\n # Variable bounds\n self.var_bounds = var_bounds\n self.low_bound, self.upp_bound = self.var_bounds\n self.low_bound_norm = norm(self.low_bound, self.low_bound, self.upp_bound)\n self.upp_bound_norm = norm(self.upp_bound, self.low_bound, self.upp_bound)\n self.var_bounds_norm = (self.low_bound_norm, self.upp_bound_norm)\n tmp = [self.var_bounds_norm for i in range(len(variables))]\n self.var_bounds_norm_list = tmp\n tmp = np.array([self.low_bound_norm for i in range(len(variables))])\n self.low_bound_norm_list = tmp\n tmp = np.array([self.upp_bound_norm for i in range(len(variables))])\n self.upp_bound_norm_list = tmp\n\n # Constraints\n self.A = constraints[0]\n self.low_bound_constr = constraints[1]\n self.upp_bound_constr = constraints[2]\n if self.A is not None:\n self._constraints_input_handler()\n self.low_bound_constr_norm = norm(self.low_bound_constr,\n self.low_bound,\n self.upp_bound)\n self.upp_bound_constr_norm = norm(self.upp_bound_constr,\n self.low_bound,\n self.upp_bound)\n\n # Yaw variables\n self.variables = variables\n self.var_initial = var_initial\n self._variables_input_handler()\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.opt_method == 'SLSQP' and self.var_initial == 'random':\n self.var_initial = self._random_yaw_generator(len(self.variables),\n self.var_bounds)\n self._var_initial_input_handler()\n self.var_initial_norm = self._var_initial_norm()\n\n # Dynamic tuning\n self.tuning_dyn_bool = tuning_dynamic\n self._tuning_dyn_bool_check()\n self.tuning_dyn_initialization = False\n\n self.opt_run = False\n\n def tuning_dyn_initialize(self, tuning_dyn_obj_list):\n \"\"\"\n Assign list of tuning dynamic objects TuningDyn to the WSOpt object.\n\n Args\n ----\n tuning_dyn_object: (list of TuningDyn objects)\n \"\"\"\n self.tuning_dyn_obj_list = tuning_dyn_obj_list\n self._tuning_dyn_init_input_handler()\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n tuning_dyn_obj.wso_compatibility_check(self)\n self.tuning_dyn_initialization = True\n\n def optimize_yaw(self):\n \"\"\"\n Optimize the yaw angle for the given WSOpt object.\n\n Returns\n -------\n opt_yaw_angles_vars: (ndarray) optimal yaw angles for the optimization variables.\n opt_yaw_angles_all: (ndarray) optimal yaw angles for all.wind farm turbines.\n \"\"\"\n # Tuning dynamic initialization check\n self._tuning_dyn_initialization_check()\n\n # Print optimization info\n self._print_info()\n\n # Wind farm power - no yaw\n self.wf_pow_noyaw = self._get_farm_power_noyaw()\n\n # Optimize\n self._iter_details_setup()\n self.opt_yaw_angles_vars, self.opt_yaw_angles_all = self.optimizer()\n self.opt_run = True\n\n return (self.opt_yaw_angles_vars, self.opt_yaw_angles_all)\n\n def get_optimization_details(self):\n \"\"\"\n Return optimization details: optimizer iterations details and objective function\n evaluations details. The two are identical for TURBO optimizers as an objective\n function evaluation corresponds to an optimizer iteration, different for SLSQP as\n additional objective function evaluations are required to approximate gradients.\n\n Returns\n -------\n iter_details: (tuple) optimizer iterations details.\n iter_yaw_angles: (list) list of yaw angles per optimizer iteration.\n iter_obj_func: (list) list of objective function per optimizer iteration.\n iter_farm_power: (list) list of farm power values per optimizer iteration.\n eval_details: (tuple) objective fucntion evaluations details.\n eval_yaw_angles: (list) list of yaw angles per evaluation.\n eval_obj_func: (list) list of objective function per evaluation.\n eval_farm_power: (list) list of farm power values per evaluation.\n \"\"\"\n iter_details = (self.iter_yaw_angles,\n self.iter_obj_func,\n self.iter_farm_power)\n eval_details = (self.eval_yaw_angles,\n self.eval_obj_func,\n self.eval_farm_power)\n return (iter_details, eval_details)\n\n # %% Private methods\n\n def _opt_method_settler(self):\n if self.opt_method not in self.opt_method_list:\n err_msg = \"Optimization method not recognized\"\n raise Exception(err_msg)\n\n def _opt_options_settler(self):\n if self.opt_options is None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n\n def _obj_function_settler(self):\n if self.obj_function_name in list(self.obj_function_dict.keys()):\n self.obj_function = self.obj_function_dict[self.obj_function_name]\n else:\n err_msg = \"Optimization function not recognized\"\n raise Exception(err_msg)\n\n def _random_yaw_generator(self, yaw_number, yaw_bounds):\n yaw_angles = []\n for i in range(yaw_number):\n x = random.choice(range(yaw_bounds[0], yaw_bounds[1]+1))\n yaw_angles.append(x)\n return yaw_angles\n\n def _yaw_initial_input_handler(self):\n if len(self.yaw_initial) != self.wf_model.n_turbs:\n err_msg = \"Initial yaw angles do not match turbine number\"\n raise Exception(err_msg)\n\n def _by_row_input_handler(self):\n if self.rows*self.cols != self.wf_model.n_turbs:\n err_msg = \"Farm rows and columns provided do not match turbine number\"\n raise Exception(err_msg)\n\n def _constraints_input_handler(self):\n if self.opt_method != 'SLSQP':\n err_msg = \"Linear constraints (on top of bounds) limited to SLSQP optimizer\"\n raise Exception(err_msg)\n\n def _variables_input_handler(self):\n if self.by_row_bool:\n for row in self.variables:\n if row > self.rows:\n err_msg = \"Row/s specified not in farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.rows:\n err_msg = \"Too many rows specified\"\n raise Exception(err_msg)\n else:\n for turb in self.variables:\n if turb > self.wf_model.n_turbs:\n err_msg = \"Turbine/s specified not in the farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.wf_model.n_turbs:\n err_msg = \"Too many turbines specified\"\n raise Exception(err_msg)\n if 0 in self.variables:\n err_msg = \"Turbine/row counting convention starts from 1\"\n raise Exception(err_msg)\n\n def _var_initial_input_handler(self):\n if self.opt_method == 'TURBO_1':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n pass\n elif self.var_initial == 'random':\n err_msg = \"Random initial variables limited to SLSQP optimizer\"\n raise Exception(err_msg)\n else:\n if len(self.var_initial) != self.opt_options[\"n_init\"]:\n err_msg = \"n_init initial variable lists are needed (see TURBO options)\"\n raise Exception(err_msg)\n elif len(self.var_initial[0]) != len(self.variables):\n err_msg = \"var_initial sublists length not equal number of variables\"\n raise Exception(err_msg)\n elif self.opt_method == 'TURBO_M':\n if self.var_initial != 'LHS':\n err_msg = \"TURBO_M optimizer requires LHS as initial sampling\"\n elif self.opt_method == 'SLSQP':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n err_msg = \"Latin Hypercube Sampling limited to TURBO optimizers\"\n raise Exception(err_msg)\n elif len(self.variables) != len(self.var_initial):\n err_msg = \"var_initial length needs to equal number of variables\"\n raise Exception(err_msg)\n\n def _var_initial_norm(self):\n if self.opt_method == \"SLSQP\":\n self.var_initial = np.array([float(item) for item in self.var_initial])\n var_initial_norm = norm(self.var_initial, self.low_bound, self.upp_bound)\n elif self.var_initial == 'LHS':\n var_initial_norm = None\n else:\n self.var_initial = np.array([np.array(x) for x in self.var_initial])\n var_initial_norm = []\n for x_list in self.var_initial:\n x_list_norm = []\n for x in x_list:\n x_norm = norm(x, self.low_bound, self.upp_bound)\n x_list_norm.append(x_norm)\n var_initial_norm.append(np.array(x_list_norm))\n return np.array(var_initial_norm)\n\n def _get_farm_power_noyaw(self):\n if (self.tuning_dyn_initialization and\n hasattr(self.tuning_dyn_obj_list[0], 'wf_pow_noyaw')):\n wf_pow_noyaw = self.tuning_dyn_obj_list[0].wf_pow_noyaw\n else:\n self.yaw_zero = np.full(shape=self.wf_model.n_turbs, fill_value=0.0)\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n # Tune parameters\n if self.tuning_dyn_initialization:\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, self.yaw_zero)\n\n wf_pow_noyaw = floris_calculate_farm_power(self.wf_model, self.yaw_zero)\n return wf_pow_noyaw\n\n def _print_info(self):\n print(\"=====================================================\")\n print(\"Optimizing wake redirection control...\")\n print(\"Optimization method: %s\" % (self.opt_method))\n print(\"Optimization function: %s \\n\" % (self.obj_function_name))\n if self.by_row_bool:\n print(\"Rows being optimized: \")\n print(self.variables)\n else:\n print(\"Turbines being optimized: \")\n print(self.variables)\n print(\"Number of variables to optimize = \", len(self.variables))\n print(\"=====================================================\")\n\n def _iter_details_setup(self):\n # Details for each obj function evaluation\n self.eval_yaw_angles = [] # deg\n self.eval_obj_func = []\n self.eval_farm_power = [] # MW\n\n # Details for each optimizer iteration\n self.iter_yaw_angles = [] # deg\n self.iter_obj_func = []\n self.iter_farm_power = [] # MW\n\n def _variables_to_farm_yaw(self, yaw_initial, var_values):\n yaw_angles = copy.deepcopy(yaw_initial)\n if self.by_row_bool:\n for i, row_idx in enumerate(self.variables):\n idx_1 = row_idx*self.cols\n idx_0 = idx_1-self.cols\n yaw_angles[idx_0:idx_1] = var_values[i]\n else:\n for i, turb_idx in enumerate(self.variables):\n yaw_angles[turb_idx-1] = var_values[i]\n return yaw_angles.tolist()\n\n # %% Optimizers\n\n def _optimizer_scipy(self):\n # Call back function for iter details\n def callback_func(xk):\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n # Linearly constrained case\n if self.A is not None:\n self.C = LinearConstraint(self.A,\n self.low_bound_constr_norm,\n self.upp_bound_constr_norm)\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n constraints=(self.C,),\n options=self.opt_options)\n # Unconstrained case\n else:\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n options=self.opt_options)\n # Extract optimal yaw angles for variables\n opt_yaw_angles_vars = unnorm(self.residual_plant.x,\n self.low_bound,\n self.upp_bound)\n # Extract optimal yaw angles for the entire farm\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Use best index because if total iterations reached, optimum not last evaluation\n eval_yaw_angles_lists = [x.tolist() for x in self.eval_yaw_angles]\n index_best = eval_yaw_angles_lists.index(opt_yaw_angles_all)\n opt_yaw_angles_all = np.array(opt_yaw_angles_all)\n self.obj_func_opt = self.eval_obj_func[index_best]\n self.farm_power_opt = self.eval_farm_power[index_best]\n\n # Add initial and last points to iteration details\n self.iter_yaw_angles.insert(0, self.eval_yaw_angles[0])\n self.iter_obj_func.insert(0, self.eval_obj_func[0])\n self.iter_farm_power.insert(0, self.eval_farm_power[0])\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_1(self):\n\n # TURBO initial sampling\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n X_init_provided = False\n X_init_same_norm = None\n else:\n X_init_provided = True\n X_init_same_norm = self.var_initial_norm\n\n # TURBO optimization\n turbo_1 = Turbo1(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n X_init_provided=X_init_provided,\n X_init_same=X_init_same_norm,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.obj_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_m(self):\n\n # TURBO optimization\n turbo_m = TurboM(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n )\n turbo_m.optimize()\n X = turbo_m.X # Evaluated points\n fX = turbo_m.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.cost_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n # %% Objective functions\n\n def _obj_function_power(self, var_norm):\n\n # Extract farm yaw angles\n var_unnorm = unnorm(var_norm, self.low_bound, self.upp_bound)\n yaw_angles = self._variables_to_farm_yaw(self.yaw_initial, var_unnorm)\n yaw_angles = np.array([float(item) for item in yaw_angles])\n\n # Tune parameters dynamically\n if self.tuning_dyn_initialization:\n # Set equal yaw angles in groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n yaw_angles = self.tuning_dyn_obj_list[0].set_yaw_groups(yaw_angles)\n # Tune parameters\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, yaw_angles)\n\n # Calculate negative of the farm power normalized by power for zero yaw\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n wf_pow = floris_calculate_farm_power(self.wf_model, yaw_angles)\n obj_function = (-1 * wf_pow / self.wf_pow_noyaw)\n\n # Update evalauation details\n self.eval_yaw_angles.append(yaw_angles)\n self.eval_obj_func.append(obj_function)\n self.eval_farm_power.append(wf_pow)\n\n return obj_function\n\n # %% Tuning Dynamic methods\n\n def _tuning_dyn_bool_check(self):\n if self.tuning_dyn_bool and self.by_row_bool:\n err_msg = \"Dynamic tuning not available for optimization by row.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_init_input_handler(self):\n if isinstance(self.tuning_dyn_obj_list, (list, np.ndarray)) is False:\n err_msg = \"TuningDyn objects need to be in a list, even if only one.\"\n raise Exception(err_msg)\n # Check dynamic grouping tuning objects have the same tuning groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n tuning_groups_first = self.tuning_dyn_obj_list[0].tuning_groups\n same_groups = all(obj.tuning_groups == tuning_groups_first\n for obj in self.tuning_dyn_obj_list)\n if same_groups is False:\n err_msg = \"TuningDyn objects have different groupings.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_initialization_check(self):\n if self.tuning_dyn_bool and self.tuning_dyn_initialization is False:\n err_msg = \"Tuning dynamic not initialized. See tuning_dyn_initialize method.\"\n raise Exception(err_msg)" }, { "identifier": "Tuning", "path": "deasc/tuning.py", "snippet": "class Tuning:\n \"\"\"\n Parameter tuning class for a low-fidelity model, where one or more\n parameters are tuned to higher fidelity power measurements. In particular,\n the RMSE is minimised for single turbine power measurements for a single or\n the sum of multiple atmospheric conditions. The wind farm layout is assumed fixed.\n \"\"\"\n\n def __init__(self,\n wf_model,\n variables_class_list,\n variables_names_list,\n variables_bounds_list,\n obj_func_name='RMSE',\n opt_method='SLSQP',\n opt_options=None\n ):\n \"\"\"\n Args\n ----\n wf_model : WfModel object (low-fidelity model)\n single WfModel object to tune\n variables_class_list: list of strings\n list of classes of parameters to tune, one per parameter\n variables_names_list : list of strings\n list of parameter names to tune\n variables_bounds_list : list of tuples\n list of parameter bounds, upper and lower limits for each parameter\n obj_func_name: string\n objective function. Default set to \"RMSE\"\n opt_method: string\n optimization method. Dafault set to \"SLSQP\" (\"TURBO_1\" also available)\n opt_options: dict\n optimizer options. Default set to None\n \"\"\"\n self.obj_func_dict = {'RMSE': self._tuning_rmse_function}\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-12,\n 'eps': 0.1},\n \"TURBO_1\": {\"n_init\": 2*len(variables_names_list),\n \"max_evals\": 100,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.tuning_optimizer_dict = {'SLSQP': self._tuning_optimizer_scipy,\n 'TURBO_1': self._tuning_optimizer_turbo_1}\n\n self.wf_model = wf_model\n self.variables_class_list = variables_class_list\n self.variables_names_list = variables_names_list\n self.variables_bounds_list = variables_bounds_list\n\n self.obj_func_name = obj_func_name\n self.obj_func = self.obj_func_dict[self.obj_func_name]\n self.opt_method = opt_method\n if opt_options == None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n else:\n self.opt_options = opt_options\n self._tuning_optimizer = self.tuning_optimizer_dict[self.opt_method]\n\n self.tuning_data_received = False\n self.tuning_conditions_received = False\n\n print(\"\\nInitialised parameter tuning\")\n print(\"%i parameters to tune\" % (len(self.variables_names_list)))\n print(\"%s optimization method\" % (self.opt_method))\n\n def tuning_data(self, data_power_list):\n \"\"\"\n Provide training higher-fidelity data for parameter tuning.\n Limited to power of each turbine for each condition ('RMSE')\n\n Args\n ----\n data_power_list : list of lists\n For each condition:\n list of turbines power output ('RMSE')\n \"\"\"\n self.tuning_data_power_list = data_power_list\n self.tuning_data_received = True\n pass\n\n def tuning_conditions(self,\n yaw_angles_list,\n wind_directions_list,\n wind_speeds_list,\n turbulence_intensities_list,\n wind_shear_list):\n \"\"\"\n Define the wind farm conditions (yaw and atmospheric)\n of the higher-fidelity data.\n\n Args\n ----\n yaw_angles_list : list of lists\n For each condition, list of turbines yaw_angles\n wind_directions_list: list\n For each condtion, wind direction\n wind_speeds_list: list\n For each condtion, wind speed\n turbulence_intensities_list: list\n For each condtion, wind direction\n wind_shear_list: list\n For each condtion, wind shear\n \"\"\"\n self.yaw_angles_list = yaw_angles_list\n self.wind_directions_list = wind_directions_list\n self.wind_speeds_list = wind_speeds_list\n self.turbulence_intensities_list = turbulence_intensities_list\n self.wind_shear_list = wind_shear_list\n self.tuning_conditions_received = True\n pass\n\n def tune_parameters(self):\n \"\"\"\n Tune specified parameters of a WfModel object.\n Requires higher-fidelity tuning data and the related conditions to be\n previously specified (refer to Tuning methods: tuning_data and tuning_conditions).\n\n Returns\n -------\n wf_model_tuned: WfModel object\n WfModel object with parameters tuned\n wf_model_dict_opt: dictionary\n tuned WfModel object dictionary\n \"\"\"\n # Double check tuning data and conditions have been specified\n if self.tuning_data_received is False:\n err_msg = \"Tuning data not specified. Use tuning_data method.\"\n raise Exception(err_msg)\n if self.tuning_conditions_received is False:\n err_msg = \"Tuning conditions not specified. Use tuning_conditions method.\"\n raise Exception(err_msg)\n\n # Extract original wf_model object dictionary and print its parameters\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.models_dict = floris_extract_models_dict(self.wf_model_dict_original)\n floris_print_params(self.wf_model_dict_original,\n self.models_dict,\n \"Original model parameters\")\n\n # Extract initial variable values and normalise them\n self.variables_init = self._wf_model_dict_to_variables(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list)\n self.variables_init_norm = self._norm_variables(self.variables_init,\n self.variables_bounds_list)\n\n # Normalize variable bounds\n tmp = self.variables_bounds_list\n (self.variables_bounds_list_norm,\n self.variables_low_bound_list_norm,\n self.variables_upp_bound_list_norm) = self._norm_variables_bounds_lists(tmp)\n\n # Minimisation of error | Extract optimal variables\n self._tuning_optimizer()\n self.opt_variables = self._unnorm_variables(self.opt_variables_norm,\n self.variables_bounds_list)\n\n # Apply tuned parameters (opt_variables) to wf_model and print them\n self.wf_model_dict_opt = self._vars_to_wf_model_dict(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list,\n self.opt_variables)\n self.wf_model = floris_param_change_object(self.wf_model, self.wf_model_dict_opt)\n floris_print_params(self.wf_model_dict_opt,\n self.models_dict,\n \"Optimal model parameters\")\n\n return self.wf_model, self.wf_model_dict_opt\n\n # %% Private methods\n\n def _wf_model_dict_to_variables(self, wf_model_dict, class_list, names_list):\n variables = []\n for i in range(len(names_list)):\n variable = floris_extract_parameter(wf_model_dict,\n class_list[i],\n names_list[i])\n variables.append(variable)\n return variables\n\n def _norm_variables(self, variables, variables_bounds_list):\n variables_norm = ([norm(variables[i],\n variables_bounds_list[i][0],\n variables_bounds_list[i][1])\n for i in range(len(variables))])\n return variables_norm\n\n def _norm_variables_bounds_lists(self, variables_bounds_list):\n variables_bounds_list_norm = []\n variables_low_bound_list_norm = []\n variables_upp_bound_list_norm = []\n for i, variable_bounds in enumerate(variables_bounds_list):\n lower_bound_norm = norm(variable_bounds[0],\n variable_bounds[0],\n variable_bounds[1])\n upper_bound_norm = norm(variable_bounds[1],\n variable_bounds[0],\n variable_bounds[1])\n bound_norm_tuple = (lower_bound_norm, upper_bound_norm)\n variables_bounds_list_norm.append(bound_norm_tuple)\n variables_low_bound_list_norm.append(lower_bound_norm)\n variables_upp_bound_list_norm.append(upper_bound_norm)\n return (variables_bounds_list_norm,\n np.array(variables_low_bound_list_norm),\n np.array(variables_upp_bound_list_norm))\n\n def _unnorm_variables(self, variables_norm, variables_bounds_list):\n variables = ([unnorm(variables_norm[i],\n variables_bounds_list[i][0],\n variables_bounds_list[i][1])\n for i in range(len(variables_norm))])\n return variables\n\n def _vars_to_wf_model_dict(self,\n wf_model_dict_original,\n variables_class_list,\n variables_names_list,\n variables):\n wf_model_dict_new = copy.deepcopy(wf_model_dict_original)\n for i in range(len(variables)):\n wf_model_dict_new = floris_param_change_object_dict(wf_model_dict_new,\n variables_class_list[i],\n variables_names_list[i],\n variables[i])\n return wf_model_dict_new\n\n def _tuning_optimizer_scipy(self):\n self.opt_results = minimize(self.obj_func,\n self.variables_init_norm,\n method=self.opt_method,\n bounds=self.variables_bounds_list_norm,\n options=self.opt_options)\n self.opt_variables_norm = self.opt_results.x\n\n def _tuning_optimizer_turbo_1(self):\n turbo_1 = Turbo1(f=self.obj_func,\n lb=self.variables_low_bound_list_norm,\n ub=self.variables_upp_bound_list_norm,\n **self.opt_options,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n self.opt_variables_norm = x_best\n\n def _tuning_rmse_function(self, variables_norm):\n\n # Unnorm variables, create new wf_model dictionary\n variables = self._unnorm_variables(variables_norm, self.variables_bounds_list)\n wf_model_dict_new = self._vars_to_wf_model_dict(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list,\n variables)\n\n # Create new wf_model object and reinitialize (atmospheric conditions set later)\n self.wf_model = floris_param_change_object(self.wf_model, wf_model_dict_new)\n\n rmse = 0\n for i in range(len(self.tuning_data_power_list)):\n\n # Calculate wind turbine power outputs with model to tune\n floris_reinitialise_atmosphere(self.wf_model,\n ws=self.wind_speeds_list[i],\n wd=self.wind_directions_list[i],\n ti=self.turbulence_intensities_list[i],\n shear=self.wind_shear_list[i])\n yaw_angles = np.array([float(item) for item in self.yaw_angles_list[i]])\n power_turbines = floris_calculate_turbine_power(self.wf_model, yaw_angles)\n\n # Calculate root mean squared error single condition\n error = 0\n for j in range(len(power_turbines)):\n error += (self.tuning_data_power_list[i][j]-power_turbines[j])**2\n rmse_single = error/len(power_turbines)\n\n # Calculate sum of root mean squared errors\n rmse += rmse_single\n\n return rmse" }, { "identifier": "GPWrap", "path": "deasc/gp.py", "snippet": "class GPWrap:\n \"\"\"\n Wrapper class to create, modify and visualise Gaussian Processes for dynamic parameter\n tuning. Currently limited to a single output GP.\n \"\"\"\n\n def __init__(self, parameter_class, parameter_name, dimensions):\n self.parameter_class = parameter_class\n self.parameter_name = parameter_name\n self.dimensions = dimensions\n \"\"\"\n Args\n ----\n parameter_class: string\n Parameter class of the optimal parameter to fit.\n parameter_name: string\n Name of the optimal parameter to fit.\n dimensions: integer\n Dimensions/inputs/variables of the GP.\n \"\"\"\n\n def GP_so(self, yaw_data, param_data, num_restarts=50, noise=0.05):\n \"\"\"\n Construct and returns a single-output (SO) GP for the given input dataset\n (optimal parameter for a given yaw configuration).\n\n Args\n ----\n yaw_data: list of lists\n list of input yaw configurations for which parameter has been tuned\n param_data: list of lists\n for each yaw configuration in yaw_data, list containing the optimal parameter\n num_restarts: int\n number of random starts of the GP hyperparameter tuning optimization\n noise: float\n noise in output prediction. Default is 0.05\n\n Returns\n -------\n m: GPy single-output Gaussian Process model\n \"\"\"\n # Sample check on argument dimension\n if len(yaw_data[0]) != self.dimensions:\n err_msg = (\"Yaw input and GP dimensions do not match\")\n raise Exception(err_msg)\n if len(param_data[0]) != 1:\n err_msg = (\"Single-output GPs only\")\n raise Exception(err_msg)\n\n # Data structure arguments\n yaw_data_GP = np.array(yaw_data)\n param_data_GP = np.array(param_data)\n\n # GP model\n kernel = GPy.kern.RBF(input_dim=self.dimensions, variance=1., lengthscale=1.)\n self.m = GPy.models.GPRegression(yaw_data_GP,\n param_data_GP,\n kernel,\n noise_var=noise)\n\n # Hyperparameter tuning\n self.m.optimize(optimizer=None, # Default lbfgsb\n start=None,\n messages=False,\n max_iters=1000)\n self.m.optimize_restarts(num_restarts=num_restarts)\n return self.m\n\n def GP_so_plot(self, parameter_range_plot, yaw_range_plot):\n \"\"\"\n Plot a single-output (SO) GP model. 1D and 2D plots are generated for each\n variable combination.\n\n Args\n ----\n parameter_range: tuple\n range of the optimal parameter to plot\n parameter_range: tuple\n range of the yaw variables to plot\n \"\"\"\n # Plotting library choice and defaults values\n GPy.plotting.change_plotting_library('matplotlib')\n GPy.plotting.matplot_dep.defaults.data_2d = {'s': 0,\n 'edgecolors': 'none',\n 'linewidth': 0.0,\n 'cmap': cm.get_cmap('hot'),\n 'alpha': 0.5}\n\n # 1D Plots\n if self.dimensions == 1:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(5, 2.5))\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n fig = self.m.plot(figure=figure,\n col=1,\n row=1,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=True)\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n figsize = (5*n_cuts, 2.5*self.dimensions)\n figure = GPy.plotting.plotting_library().figure(self.dimensions,\n n_cuts,\n figsize=figsize)\n\n for dim_idx in range(self.dimensions):\n for i, slice_single in zip(range(n_cuts), slices):\n title = \"GP %s - $\\gamma_{others}$\" \\\n \"%.1f $^{\\circ}$\" % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (dim_idx+1)\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n inputs = []\n for j in range(self.dimensions):\n if j == dim_idx:\n pass\n else:\n inputs.append((j, slice_single))\n fig = self.m.plot(figure=figure,\n col=(i+1),\n row=(dim_idx+1),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=False)\n\n # 2D Plots\n # Countours are fine ##\n # Data points (training) plotted are off ##\n # double checked with GP and training database ##\n if self.dimensions == 1:\n pass\n elif self.dimensions == 2:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(3, 2.5))\n\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$\\gamma_{2}$ [deg]'\n\n fig = self.m.plot(figure=figure,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n plot_rows = self.dimensions-1\n plot_cols = self.dimensions-1\n combinations = list(itertools.combinations(\n list(range(0, self.dimensions)), 2))\n\n figsize = (3*plot_cols*len(slices), 2.5*plot_rows)\n figure = GPy.plotting.plotting_library().figure(plot_rows,\n plot_cols*len(slices),\n figsize=figsize)\n for i, slice_single in zip(range(n_cuts), slices):\n for comb_idx, comb in enumerate(combinations):\n title = 'GP %s - $\\gamma_{others}$' \\\n '%.1f $^{\\circ}$' % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (comb[0]+1)\n ylabel = '$\\gamma_{%i}$ [deg]' % (comb[1]+1)\n inputs = []\n for j in range(self.dimensions):\n if j in comb:\n pass\n else:\n inputs.append((j, slice_single))\n\n fig = self.m.plot(figure=figure,\n col=(comb[0]+1+plot_cols*i),\n row=(comb[1]),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))" }, { "identifier": "TuningDyn_Grouping", "path": "deasc/tuning_dynamic.py", "snippet": "class TuningDyn_Grouping(TuningDyn, TuningDyn_SharedMethods):\n \"\"\"Class for dynamic parameter tuning with grouping of turbines within a wind farm.\"\"\"\n\n def __init__(self, param_class, param_name, tuning_groups, GP_model):\n \"\"\"\n Args\n ----\n param_class: (string) tuning parameter class.\n param_name: (string) tuning parameter name.\n tuning_groups: (list of lists) list of turbine groups included in the tuning. In\n each list, specify the turbines in the group.\n GP_model: (GPy object) GP model with len(tuning_groups) input dimensions.\n \"\"\"\n super().__init__(param_class, param_name)\n # Tuning info\n self.tuning_variables = tuning_groups\n self.tuning_dimensions = len(self.tuning_variables)\n self.GP_model = GP_model\n # GP dimension check\n self._GP_dimension_check(self.tuning_dimensions, self.GP_model)\n # Grouping info\n self.tuning_groups = tuning_groups\n self.grouping_bool = True\n\n @property\n def tuning_turbines(self):\n \"\"\"List of the tuning turbines in the wind farm.\"\"\"\n return [x for sublist in self.tuning_variables for x in sublist]\n\n def wso_compatibility_check(self, wso_obj):\n \"\"\"\n Check compatibility with a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object to which dynamic parameter tuning is added.\n \"\"\"\n self._tuning_turbines_check(wso_obj, self.tuning_turbines)\n self._tuning_groups_check(wso_obj)\n\n def tune_parameter(self, wso_obj, yaw_angles):\n \"\"\"\n Perform parameter tuning in a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object.\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n wf-model_tuned: (WfModel) tuned WfModel to use in the current iteration of the\n wake steering optimisation.\n \"\"\"\n # Extract WSOpt WfModel dictionary\n wf_model_dict = floris_extract_object_dict(wso_obj.wf_model)\n\n # Create and apply tuned WfModel dictionary\n GP_input = self._get_GP_input_groups(self.tuning_groups, yaw_angles)\n mu, var, = self.GP_model.predict_noiseless(np.array([GP_input]))\n optimal_parameter = mu[0][0]\n wf_model_dict_tuned = floris_param_change_object_dict(wf_model_dict,\n self.param_class,\n self.param_name,\n optimal_parameter)\n wf_model_tuned = floris_param_change_object(wso_obj.wf_model,\n wf_model_dict_tuned)\n return wf_model_tuned\n\n def set_yaw_groups(self, yaw_angles):\n \"\"\"\n Force yaw angles of turbines in tuning groups to be equal in the wake\n steering optimisation.\n\n Args\n ----\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n yaw_angles_grouped: (np.ndarray) yaw angles of all turbines in the wind farm with\n equal yaw angles in each turbine group.\n \"\"\"\n return self._set_yaw_groups(yaw_angles)" }, { "identifier": "TuningDyn_Looping_Turbine", "path": "deasc/tuning_dynamic.py", "snippet": "class TuningDyn_Looping_Turbine(TuningDyn, TuningDyn_SharedMethods):\n \"\"\"\n Class for dynamic parameter tuning with the looping approach of turbines within\n a wind farm.\n \"\"\"\n\n def __init__(self, param_class, param_name, tuning_turbine, GP_model, wf_pow_noyaw):\n \"\"\"\n Args\n ----\n param_class: (string) tuning parameter class.\n param_name: (string) tuning parameter name.\n tuning_turbines: (list) list of single turbine included in the tuning.\n GP_model: (GPy object) GP model with a single input dimension.\n wf_pow_noyaw: (float) value of the wind farm power without any yaw applied,\n usually extracted from the previous grouping optimisation to refine.\n \"\"\"\n super().__init__(param_class, param_name)\n # Tuning info\n self.tuning_variables = tuning_turbine\n self.tuning_dimensions = len(self.tuning_variables)\n self.GP_model = GP_model\n self._GP_dimension_check(self.tuning_dimensions, self.GP_model)\n # Looping info\n self.wf_pow_noyaw = wf_pow_noyaw\n self.tuning_bool = True\n\n @property\n def tuning_turbines(self):\n \"\"\"List of the tuning turbines in the wind farm.\"\"\"\n return self.tuning_variables\n\n def wso_compatibility_check(self, wso_obj):\n \"\"\"\n Check compatibility with a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object to which dynamic parameter tuning is added.\n \"\"\"\n self._tuning_turbines_check(wso_obj, self.tuning_turbines)\n self._looping_check(wso_obj)\n\n def tune_parameter(self, wso_obj, yaw_angles):\n \"\"\"\n Perform parameter tuning in a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object.\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n wf-model_tuned: (WfModel) tuned WfModel to use in the current iteration of the\n wake steering optimisation.\n \"\"\"\n # Extract WSOpt WfModel dictionary\n wf_model_dict = floris_extract_object_dict(wso_obj.wf_model)\n\n # Create and apply tuned WfModel dictionary\n GP_input = self._get_GP_input_turbines(self.tuning_turbines, yaw_angles)\n mu, var, = self.GP_model.predict_noiseless(np.array([GP_input]))\n optimal_parameter = mu[0][0]\n wf_model_dict_tuned = floris_param_change_object_dict(wf_model_dict,\n self.param_class,\n self.param_name,\n optimal_parameter)\n wf_model_tuned = floris_param_change_object(wso_obj.wf_model,\n wf_model_dict_tuned)\n return wf_model_tuned\n\n def _looping_check(self, wso_obj):\n if len(self.tuning_variables) != 1:\n err_msg = \"While looping, only a single turbine can be tuned.\"\n raise Exception(err_msg)\n if len(wso_obj.variables) != 1:\n err_msg = \"While looping, only a single turbine can be optimised.\"\n raise Exception(err_msg)" }, { "identifier": "floris_extract_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_extract_object_dict(wf_model):\n \"\"\"Extract and return the current FLORIS object dictionary.\"\"\"\n return wf_model.interface.floris.as_dict()" }, { "identifier": "floris_extract_parameter", "path": "deasc/utils_floris.py", "snippet": "def floris_extract_parameter(wf_model_dict, param_class, param_name):\n \"\"\"Extract and return the current parameter value of a FLORIS object parameter.\"\"\"\n models_dict = floris_extract_models_dict(wf_model_dict)\n return wf_model_dict['wake'][param_class][models_dict[param_class]][param_name]" }, { "identifier": "floris_param_change_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object_dict(wf_model_dict, param_class, param_name, param_value):\n \"\"\"\n Change FLORIS object with a new model parameter, return new FLORIS object dictionary.\n FLORIS object is not reinitialised (see function floris_parameter_change_object).\n \"\"\"\n wf_model_dict_new = copy.deepcopy(wf_model_dict)\n models_dict = floris_extract_models_dict(wf_model_dict_new)\n (wf_model_dict_new['wake'][param_class]\n [models_dict[param_class]][param_name]) = param_value\n return wf_model_dict_new" }, { "identifier": "floris_param_change_object", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object(wf_model, wf_model_dict_new):\n \"\"\"Change FLORIS object with new object dictionary. Also reinitialise farm layout.\"\"\"\n x_reinit, y_reinit = wf_model.interface.get_turbine_layout()\n wf_model.interface = FI(wf_model_dict_new)\n wf_model.interface.reinitialize(layout_x=x_reinit, layout_y=y_reinit)\n return wf_model" } ]
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import Tuning from deasc import GPWrap from deasc import TuningDyn_Grouping from deasc import TuningDyn_Looping_Turbine from deasc.utils_floris import ( floris_extract_object_dict, floris_extract_parameter, floris_param_change_object_dict, floris_param_change_object )
16,606
print(opt_yaw_angles_all) # Extract wind farm power without any yaw wf_pow_noyaw = wso_obj_tuning.wf_pow_noyaw # %% Looping refinement yaw_initial = opt_yaw_angles_all # Number of loops for each turbine n_iterations = 1 # One loop for each turbine variable for turbine in [1, 2, 3, 4]*n_iterations: # Wake steering optimisation inputs - single turbine inflow = (yaw_initial, wd, ws, ti, shear) variables = [turbine] var_initial = [yaw_initial[turbine-1]] # %% Looping GP dataset # Higher fidelity dataset # Initialise trainer and set farm layout path = "./inputs/" input_file_trainer = "gch.yaml" trainer = WfModel(input_file_trainer, path) trainer.set_aligned_layout(5, 1, 7, 5) # Define training set yaw_list = [] for yaw_var in np.linspace(-25, 25, 7): yaw_single = yaw_initial.copy() yaw_single[turbine-1] = yaw_var yaw_list.append(yaw_single) # Produce high-fidelity power measurement for each training condition wt_pow_training_list = [] for i in range(len(yaw_list)): _, wt_pow_training, _, _ = trainer.farm_eval(yaw=yaw_list[i], wd=wd, ws=ws, ti=ti, shear=shear) wt_pow_training_list.append(wt_pow_training) # Parameter tuning - Run a single optimisation for each training condition # Initialise dataset optimal_parameter_dataset = {} for i, yaw in enumerate(yaw_list): # Initialise trainee trainee = wf_model # Parameters to tune param_class_list = ['wake_velocity_parameters'] param_name_list = ['we'] param_bounds_list = [(0.0, 0.1)] # TURBO options TURBO_opt = {"n_init": 2, "max_evals": 100, "batch_size": 4, # 1 = Serial "verbose": True, "use_ard": True, "max_cholesky_size": 2000, "n_training_steps": 50, "min_cuda": 1024, "device": "cpu", "dtype": "float64"} # Initialise parameter tuning object tune_obj = Tuning(wf_model=trainee, variables_class_list=param_class_list, variables_names_list=param_name_list, variables_bounds_list=param_bounds_list, obj_func_name='RMSE', opt_method='TURBO_1', opt_options=TURBO_opt) # Specify higher-fidelity tuning condition tune_obj.tuning_conditions(yaw_angles_list=[yaw], wind_directions_list=[wd], wind_speeds_list=[ws], turbulence_intensities_list=[ti], wind_shear_list=[shear]) # Specify higher-fidelity turbine power measurements tune_obj.tuning_data(data_power_list=[wt_pow_training_list[i]]) # Tune parameters, extract tuned dictionary, reinitialise wf_model object trainee, trainee_dict_opt = tune_obj.tune_parameters() # Extract tuned k parameter k_tuned = floris_extract_parameter(trainee_dict_opt, param_class_list[0], param_name_list[0]) # Add yaw combination and optimal parameter to dataset optimal_parameter_dataset[tuple(yaw)] = k_tuned # %% Looping wso optimisation # Extract GP input yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[turbine-1]]) param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=1) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=50, noise=0.05) # Tuning object initialisation
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with the looping approach is implemented to refine the results achieved with grouping. Tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of all wind turbines in the farm, excluding the most downstream one. """ # %% Initial wake steering optimisation - Grouping approach for dynamic parameter tuning # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(5), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [1, 2, 3, 4] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # Dynamic tuning object # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_5x1_2dim_grouping.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[0], key[2]]) # Extract group yaw param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=100, noise=0.05) # Tuning object initialisation tuning_dyn_obj = TuningDyn_Grouping(param_class=parameter_class, param_name=parameter_name, tuning_groups=[[1, 2], [3, 4]], GP_model=GP_model) # Optimisation with dynamic tuning # Initialise wake steering object wso_obj_tuning = WSOpt(wf_model=wf_model, inflow=inflow, variables=variables, var_bounds=var_bounds, var_initial=var_initial, opt_method="SLSQP", opt_options=None, obj_function="Farm Power", tuning_dynamic=True ) # Assign dynamic tuning to wake steering optimisation wso_obj_tuning.tuning_dyn_initialize([tuning_dyn_obj]) # Optimise and print yaw angles opt_yaw_angles_vars, opt_yaw_angles_all = wso_obj_tuning.optimize_yaw() print('Optimal farm yaw angles with dynamic parameter tuning:') print(opt_yaw_angles_all) # Extract wind farm power without any yaw wf_pow_noyaw = wso_obj_tuning.wf_pow_noyaw # %% Looping refinement yaw_initial = opt_yaw_angles_all # Number of loops for each turbine n_iterations = 1 # One loop for each turbine variable for turbine in [1, 2, 3, 4]*n_iterations: # Wake steering optimisation inputs - single turbine inflow = (yaw_initial, wd, ws, ti, shear) variables = [turbine] var_initial = [yaw_initial[turbine-1]] # %% Looping GP dataset # Higher fidelity dataset # Initialise trainer and set farm layout path = "./inputs/" input_file_trainer = "gch.yaml" trainer = WfModel(input_file_trainer, path) trainer.set_aligned_layout(5, 1, 7, 5) # Define training set yaw_list = [] for yaw_var in np.linspace(-25, 25, 7): yaw_single = yaw_initial.copy() yaw_single[turbine-1] = yaw_var yaw_list.append(yaw_single) # Produce high-fidelity power measurement for each training condition wt_pow_training_list = [] for i in range(len(yaw_list)): _, wt_pow_training, _, _ = trainer.farm_eval(yaw=yaw_list[i], wd=wd, ws=ws, ti=ti, shear=shear) wt_pow_training_list.append(wt_pow_training) # Parameter tuning - Run a single optimisation for each training condition # Initialise dataset optimal_parameter_dataset = {} for i, yaw in enumerate(yaw_list): # Initialise trainee trainee = wf_model # Parameters to tune param_class_list = ['wake_velocity_parameters'] param_name_list = ['we'] param_bounds_list = [(0.0, 0.1)] # TURBO options TURBO_opt = {"n_init": 2, "max_evals": 100, "batch_size": 4, # 1 = Serial "verbose": True, "use_ard": True, "max_cholesky_size": 2000, "n_training_steps": 50, "min_cuda": 1024, "device": "cpu", "dtype": "float64"} # Initialise parameter tuning object tune_obj = Tuning(wf_model=trainee, variables_class_list=param_class_list, variables_names_list=param_name_list, variables_bounds_list=param_bounds_list, obj_func_name='RMSE', opt_method='TURBO_1', opt_options=TURBO_opt) # Specify higher-fidelity tuning condition tune_obj.tuning_conditions(yaw_angles_list=[yaw], wind_directions_list=[wd], wind_speeds_list=[ws], turbulence_intensities_list=[ti], wind_shear_list=[shear]) # Specify higher-fidelity turbine power measurements tune_obj.tuning_data(data_power_list=[wt_pow_training_list[i]]) # Tune parameters, extract tuned dictionary, reinitialise wf_model object trainee, trainee_dict_opt = tune_obj.tune_parameters() # Extract tuned k parameter k_tuned = floris_extract_parameter(trainee_dict_opt, param_class_list[0], param_name_list[0]) # Add yaw combination and optimal parameter to dataset optimal_parameter_dataset[tuple(yaw)] = k_tuned # %% Looping wso optimisation # Extract GP input yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[turbine-1]]) param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=1) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=50, noise=0.05) # Tuning object initialisation
tuning_dyn_obj = TuningDyn_Looping_Turbine(param_class=parameter_class,
5
2023-11-10 18:13:27+00:00
24k
PlaxtonFlarion/NexaFlow
nexaflow/skills/alynex.py
[ { "identifier": "toolbox", "path": "nexaflow/toolbox.py", "snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.List, hooks: typing.List = None\n) -> typing.List[float]:\ndef get_current_frame_id(video_cap: cv2.VideoCapture) -> int:\ndef get_current_frame_time(video_cap: cv2.VideoCapture) -> float:\ndef imread(img_path: str, *_, **__) -> np.ndarray:\ndef get_frame_time(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> float:\ndef get_frame_count(video_cap: cv2.VideoCapture) -> int:\ndef get_frame_size(video_cap: cv2.VideoCapture) -> typing.Tuple[int, int]:\ndef get_frame(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> np.ndarray:\ndef turn_grey(old: np.ndarray) -> np.ndarray:\ndef turn_binary(old: np.ndarray) -> np.ndarray:\ndef turn_hog_desc(old: np.ndarray) -> np.ndarray:\ndef turn_lbp_desc(old: np.ndarray, radius: int = None) -> np.ndarray:\ndef turn_blur(old: np.ndarray) -> np.ndarray:\ndef sharpen_frame(old: np.ndarray) -> np.ndarray:\ndef calc_mse(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef calc_psnr(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef compress_frame(\n old: np.ndarray,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n not_grey: bool = None,\n interpolation: int = None,\n *_,\n **__,\n) -> np.ndarray:\ndef get_timestamp_str() -> str:\ndef np2b64str(frame: np.ndarray) -> str:\ndef fps_convert(\n target_fps: int, source_path: str, target_path: str, ffmpeg_exe: str = None\n) -> int:\ndef match_template_with_object(\n template: np.ndarray,\n target: np.ndarray,\n engine_template_cv_method_name: str = None,\n **kwargs,\n) -> typing.Dict[str, typing.Any]:\ndef match_template_with_path(\n template: str, target: np.ndarray, **kwargs\n) -> typing.Dict[str, typing.Any]:\ndef show_progress(total: int, color: int, title: str) -> tqdm:\ndef draw_line(image_path: str, save_path: str = None):" }, { "identifier": "Report", "path": "nexaflow/skills/report.py", "snippet": "class Report(object):\n\n __lock: threading.Lock = threading.Lock()\n __initialized: bool = False\n __instance = None\n __init_var = None\n\n def __new__(cls, *args, **kwargs):\n if cls.__instance is None:\n with cls.__lock:\n if cls.__instance is None:\n cls.__instance = super(Report, cls).__new__(cls)\n cls.__init_var = (args, kwargs)\n return cls.__instance\n\n def __init__(self, total_path: str):\n if not self.__initialized:\n self.__initialized = True\n\n self.clock: Any = lambda: time.strftime(\"%Y%m%d%H%M%S\")\n\n self.__title: str = \"\"\n self.__query: str = \"\"\n self.query_path: str = \"\"\n self.video_path: str = \"\"\n self.frame_path: str = \"\"\n self.extra_path: str = \"\"\n\n self.range_list: list[dict] = []\n self.total_list: list[dict] = []\n\n self.total_path = os.path.join(total_path, f\"Nexa_{self.clock()}_{os.getpid()}\", \"Nexa_Collection\")\n # self.total_path = \"/Users/acekeppel/PycharmProjects/NexaFlow/report/Nexa_20230822223025/Nexa_Collection\"\n os.makedirs(self.total_path, exist_ok=True)\n\n self.reset_path = os.path.join(os.path.dirname(self.total_path), \"Nexa_Recovery\")\n os.makedirs(self.reset_path, exist_ok=True)\n log_papers = os.path.join(self.reset_path, \"nexaflow.log\")\n logger.add(log_papers, format=FORMAT, level=\"DEBUG\")\n\n @property\n def proto_path(self) -> str:\n return os.path.join(self.query_path, self.query)\n\n @property\n def title(self):\n return self.__title\n\n @title.setter\n def title(self, title: str):\n self.__title = title\n self.query_path = os.path.join(self.total_path, self.title)\n os.makedirs(self.query_path, exist_ok=True)\n logger.info(f\"✪✪✪✪✪✪✪✪✪✪ {self.title} ✪✪✪✪✪✪✪✪✪✪\\n\")\n\n @title.deleter\n def title(self):\n del self.__title\n\n @property\n def query(self):\n return self.__query\n\n @query.setter\n def query(self, query: str):\n self.__query = query\n self.video_path = os.path.join(self.query_path, self.query, \"video\")\n self.frame_path = os.path.join(self.query_path, self.query, \"frame\")\n self.extra_path = os.path.join(self.query_path, self.query, \"extra\")\n os.makedirs(self.video_path, exist_ok=True)\n os.makedirs(self.frame_path, exist_ok=True)\n os.makedirs(self.extra_path, exist_ok=True)\n logger.info(f\"Start -> {self.query}\")\n\n @query.deleter\n def query(self):\n del self.__query\n\n def load(self, inform: Optional[Dict[str, Union[str | Dict]]]) -> None:\n if inform:\n self.range_list.append(inform)\n logger.info(f\"End -> {self.query}\\n\")\n\n def create_report(self) -> None:\n\n def start_create(result):\n handler_list = []\n query = result.get(\"query\", \"TimeCost\")\n stage = result.get(\"stage\", {\"start\": 1, \"end\": 2, \"cost\": \"0.00000\"})\n frame = result.get(\"frame\", \"\")\n extra = result.get(\"extra\", \"\")\n proto = result.get(\"proto\", \"\")\n\n image_list = []\n for image in os.listdir(frame):\n image_src = os.path.join(query, \"frame\", image)\n image_ids = re.search(r\"\\d+(?=_)\", image).group()\n timestamp = float(re.search(r\"(?<=_).+(?=\\.)\", image).group())\n image_list.append(\n {\n \"src\": image_src,\n \"frames_id\": image_ids,\n \"timestamp\": f\"{timestamp:.5f}\"\n }\n )\n image_list.sort(key=lambda x: int(x[\"frames_id\"]))\n\n extra_list = []\n for ex in os.listdir(extra):\n extra_src = os.path.join(query, \"extra\", ex)\n extra_idx = ex.split(\"(\")[0]\n extra_list.append(\n {\n \"src\": extra_src,\n \"idx\": extra_idx\n }\n )\n extra_list.sort(key=lambda x: int(x[\"idx\"].split(\"(\")[0]))\n\n handler_list.append(\n {\n \"query\": query,\n \"stage\": stage,\n \"image_list\": image_list,\n \"extra_list\": extra_list,\n \"proto\": os.path.join(query, os.path.basename(proto))\n }\n )\n\n return handler_list\n\n if len(self.range_list) > 0:\n if len(self.range_list) == 1:\n images_list = start_create(self.range_list[0])\n else:\n with ThreadPoolExecutor() as executor:\n future = executor.map(start_create, self.range_list)\n images_list = [i for f in future for i in f]\n\n loader = FileSystemLoader(os.path.join(Constants.NEXA, \"template\"))\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_main.html\")\n\n html = template.render(title=self.title, images_list=images_list)\n report_html = os.path.join(self.query_path, f\"{self.title}.html\")\n with open(file=report_html, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成聚合报告: {os.path.basename(report_html)}\")\n\n cost_list = [cost['stage']['cost'] for cost in images_list]\n href_path = os.path.join(\n os.path.basename(self.total_path),\n self.title,\n os.path.basename(report_html)\n )\n single = {\n \"case\": self.title,\n \"cost_list\": cost_list,\n \"avg\": f\"{sum(map(float, cost_list)) / len(cost_list):.5f}\",\n \"href\": href_path\n }\n logger.debug(\"Recovery: \" + json.dumps(single, ensure_ascii=False))\n self.total_list.append(single)\n self.range_list.clear()\n else:\n logger.info(\"没有可以聚合的报告 ...\")\n\n logger.info(f\"✪✪✪✪✪✪✪✪✪✪ {self.title} ✪✪✪✪✪✪✪✪✪✪\\n\\n\")\n\n def create_total_report(self) -> None:\n if len(self.total_list) > 0:\n loader = FileSystemLoader(os.path.join(Constants.NEXA, \"template\"))\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_information.html\")\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n html = template.render(report_time=report_time, total_list=self.total_list)\n\n total_html_path = os.path.join(os.path.dirname(self.total_path), \"NexaFlow.html\")\n with open(file=total_html_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成汇总报告: {total_html_path}\\n\\n\")\n self.total_list.clear()\n else:\n logger.info(\"没有可以汇总的报告 ...\")\n\n @staticmethod\n def reset_report(file_name: str) -> None:\n loader = FileSystemLoader(os.path.join(Constants.NEXA, \"template\"))\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_information.html\")\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n\n with open(\n file=os.path.join(file_name, \"Nexa_Recovery\", \"nexaflow.log\"),\n mode=\"r\", encoding=\"utf-8\"\n ) as f:\n log_restore = re.findall(r\"(?<=Recovery: ).*}\", f.read())\n total_list = [json.loads(file) for file in log_restore]\n html = template.render(report_time=report_time, total_list=total_list)\n\n total_html_path = os.path.join(file_name, \"NexaFlow.html\")\n with open(file=total_html_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成汇总报告: {total_html_path}\\n\\n\")\n\n @staticmethod\n def merge_report(merge_list: List[str], loader_merge_loc: str) -> None:\n merge_path = os.path.join(\n os.path.dirname(os.path.dirname(merge_list[0])),\n \"Merge_Nexa_\" + time.strftime(\"%Y%m%d%H%M%S\"),\n \"Nexa_Collection\"\n )\n os.makedirs(merge_path, exist_ok=True)\n log_restore = []\n for merge in merge_list:\n logs = os.path.join(os.path.dirname(merge), \"Nexa_Recovery\", \"nexaflow.log\")\n with open(file=logs, mode=\"r\", encoding=\"utf-8\") as f:\n log_restore.extend(re.findall(r\"(?<=Recovery: ).*}\", f.read()))\n shutil.copytree(\n merge, merge_path, dirs_exist_ok=True,\n ignore=shutil.ignore_patterns(\"NexaFlow.html\", \"nexaflow.log\")\n )\n\n loader = FileSystemLoader(loader_merge_loc)\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_information.html\")\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n total_list = [json.loads(file) for file in log_restore]\n html = template.render(report_time=report_time, total_list=total_list)\n\n total_html_path = os.path.join(os.path.dirname(merge_path), \"NexaFlow.html\")\n with open(file=total_html_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"合并汇总报告: {total_html_path}\\n\\n\")\n\n @staticmethod\n async def ask_create_report(major_loc, title, total_path, query_path, range_list):\n\n async def handler_inform(result):\n handler_list = []\n query = result.get(\"query\", \"TimeCost\")\n stage = result.get(\"stage\", {\"start\": 1, \"end\": 2, \"cost\": \"0.00000\"})\n frame = result.get(\"frame\", \"\")\n extra = result.get(\"extra\", \"\")\n proto = result.get(\"proto\", \"\")\n\n async def handler_frame():\n handler_image_list = []\n for image in os.listdir(\n os.path.join(\n query_path, query, os.path.basename(frame)\n )\n ):\n image_src = os.path.join(query, \"frame\", image)\n image_ids = re.search(r\"\\d+(?=_)\", image).group()\n timestamp = float(re.search(r\"(?<=_).+(?=\\.)\", image).group())\n handler_image_list.append(\n {\n \"src\": image_src,\n \"frames_id\": image_ids,\n \"timestamp\": f\"{timestamp:.5f}\"\n }\n )\n handler_image_list.sort(key=lambda x: int(x[\"frames_id\"]))\n return handler_image_list\n\n async def handler_extra():\n handler_extra_list = []\n for ex in os.listdir(\n os.path.join(\n query_path, query, os.path.basename(extra)\n )\n ):\n extra_src = os.path.join(query, \"extra\", ex)\n extra_idx = ex.split(\"(\")[0]\n handler_extra_list.append(\n {\n \"src\": extra_src,\n \"idx\": extra_idx\n }\n )\n handler_extra_list.sort(key=lambda x: int(x[\"idx\"].split(\"(\")[0]))\n return handler_extra_list\n\n image_list, extra_list = await asyncio.gather(\n handler_frame(), handler_extra()\n )\n\n handler_list.append(\n {\n \"query\": query,\n \"stage\": stage,\n \"image_list\": image_list,\n \"extra_list\": extra_list,\n \"proto\": os.path.join(query, os.path.basename(proto))\n }\n )\n return handler_list\n\n async def handler_start():\n single = {}\n if len(range_list) > 0:\n tasks = [handler_inform(result) for result in range_list]\n results = await asyncio.gather(*tasks)\n images_list = [ele for res in results for ele in res]\n\n major_loader = FileSystemLoader(major_loc)\n major_environment = Environment(loader=major_loader)\n major_template = major_environment.get_template(\"template_main.html\")\n\n html = major_template.render(title=title, images_list=images_list)\n report_html = os.path.join(query_path, f\"{title}.html\")\n with open(file=report_html, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成聚合报告: {os.path.basename(report_html)}\")\n\n cost_list = [cost['stage']['cost'] for cost in images_list]\n href_path = os.path.join(\n os.path.basename(total_path),\n title,\n os.path.basename(report_html)\n )\n single = {\n \"case\": title,\n \"cost_list\": cost_list,\n \"avg\": f\"{sum(map(float, cost_list)) / len(cost_list):.5f}\",\n \"href\": href_path\n }\n logger.debug(\"Recovery: \" + json.dumps(single, ensure_ascii=False))\n else:\n logger.info(\"没有可以聚合的报告 ...\")\n\n logger.info(f\"✪✪✪✪✪✪✪✪✪✪ {title} ✪✪✪✪✪✪✪✪✪✪\\n\\n\")\n return single\n\n return await handler_start()\n\n @staticmethod\n async def ask_create_total_report(file_name: str, major_loc: str, loader_total_loc: str):\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n try:\n with open(file=os.path.join(file_name, \"Nexa_Recovery\", \"nexaflow.log\"), mode=\"r\", encoding=\"utf-8\") as f:\n open_file = f.read()\n except FileNotFoundError as e:\n return e\n else:\n match_list = re.findall(r\"(?<=Restore: ).*}\", open_file)\n range_list = [json.loads(file.replace(\"'\", '\"')) for file in match_list if file]\n grouped_dict = defaultdict(list)\n for part in range_list:\n parts = part.pop(\"title\"), part.pop(\"total_path\"), part.pop(\"query_path\")\n grouped_dict[parts].append(part)\n\n tasks = [\n Report.ask_create_report(\n major_loc,\n title,\n os.path.join(file_name, os.path.basename(total_path)),\n os.path.join(file_name, os.path.basename(total_path), title),\n range_list\n )\n for (title, total_path, query_path), range_list in grouped_dict.items()\n ]\n merge_result = await asyncio.gather(*tasks)\n total_list = [merge for merge in merge_result]\n\n if len(total_list) > 0:\n total_loader = FileSystemLoader(loader_total_loc)\n total_environment = Environment(loader=total_loader)\n total_template = total_environment.get_template(\"template_information.html\")\n\n html = total_template.render(report_time=report_time, total_list=total_list)\n total_html = os.path.join(file_name, \"NexaFlow.html\")\n with open(file=total_html, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成汇总报告: {total_html}\")\n else:\n logger.info(\"没有可以汇总的报告 ...\")\n\n @staticmethod\n def draw(\n classifier_result,\n proto_path: str,\n compress_rate: float = None,\n target_size: Tuple[int, int] = None,\n boost_mode: bool = False,\n framix_template: str = None\n ) -> str:\n\n label_stable: str = \"稳定阶段\"\n label_unstable: str = \"不稳定阶段\"\n label_unspecific: str = \"不明阶段\"\n\n thumbnail_list: List[Dict[str, str]] = list()\n extra_dict: Dict[str, str] = dict()\n\n if not compress_rate:\n compress_rate = 0.2\n\n try:\n stage_range = classifier_result.get_stage_range()\n except AssertionError:\n stage_range = [classifier_result.data]\n\n if boost_mode:\n for cur_index in range(len(stage_range)):\n each = stage_range[cur_index]\n middle = each[len(each) // 2]\n image_list = []\n if middle.is_stable():\n label = label_stable\n image = toolbox.compress_frame(\n middle.get_data(), compress_rate=compress_rate, target_size=target_size\n )\n frame = {\n \"frame_id\": middle.frame_id,\n \"timestamp\": f\"{middle.timestamp:.5f}\",\n \"image\": toolbox.np2b64str(image)\n }\n image_list.append(frame)\n else:\n if middle.stage == constants.UNKNOWN_STAGE_FLAG:\n label = label_unspecific\n else:\n label = label_unstable\n\n if cur_index + 1 < len(stage_range):\n new_each = [*each, stage_range[cur_index + 1][0]]\n else:\n new_each = each\n\n for i in new_each:\n image = toolbox.compress_frame(\n i.get_data(), compress_rate=compress_rate, target_size=target_size\n )\n frame = {\n \"frame_id\": i.frame_id,\n \"timestamp\": f\"{i.timestamp:.5f}\",\n \"image\": toolbox.np2b64str(image)\n }\n image_list.append(frame)\n\n first, last = each[0], each[-1]\n title = (f\"{label} \"\n f\"区间: {first.frame_id}({first.timestamp:.5f}) - {last.frame_id}({last.timestamp:.5f}) \"\n f\"耗时: {last.timestamp - first.timestamp:.5f} \"\n f\"分类: {first.stage}\")\n thumbnail_list.append({title: image_list})\n else:\n for cur_index in range(len(stage_range)):\n each_range = stage_range[cur_index]\n middle = each_range[len(each_range) // 2]\n\n if middle.is_stable():\n label = label_stable\n elif middle.stage == constants.UNKNOWN_STAGE_FLAG:\n label = label_unspecific\n else:\n label = label_unstable\n\n if cur_index + 1 < len(stage_range):\n range_for_display = [*each_range, stage_range[cur_index + 1][0]]\n else:\n range_for_display = each_range\n\n image_list = []\n for i in range_for_display:\n image = toolbox.compress_frame(\n i.get_data(), compress_rate=compress_rate, target_size=target_size\n )\n frame = {\n \"frame_id\": i.frame_id,\n \"timestamp\": f\"{i.timestamp:.5f}\",\n \"image\": toolbox.np2b64str(image)\n }\n image_list.append(frame)\n\n first, last = each_range[0], each_range[-1]\n title = (f\"{label} \"\n f\"区间: {first.frame_id}({first.timestamp:.5f}) - {last.frame_id}({last.timestamp:.5f}) \"\n f\"耗时: {last.timestamp - first.timestamp:.5f} \"\n f\"分类: {first.stage}\")\n thumbnail_list.append({title: image_list})\n\n cost_dict = classifier_result.calc_changing_cost()\n timestamp = toolbox.get_timestamp_str()\n\n extra_dict[\"视频路径\"] = classifier_result.video_path\n extra_dict[\"总计帧数\"] = str(classifier_result.get_length())\n extra_dict[\"每帧间隔\"] = str(classifier_result.get_offset())\n\n def get_template() -> str:\n template_dirs = os.path.join(Constants.NEXA, \"template\")\n template_path = os.path.join(template_dirs, \"template_extra.html\")\n with open(template_path, encoding=constants.CHARSET) as t:\n template_file = t.read()\n return template_file\n\n if framix_template:\n template = Template(framix_template)\n else:\n template = Template(get_template())\n\n template_content = template.render(\n thumbnail_list=thumbnail_list,\n extras=extra_dict,\n background_color=constants.BACKGROUND_COLOR,\n cost_dict=cost_dict,\n timestamp=timestamp,\n version_code=\"1.0.0\"\n )\n\n default_name = f\"{timestamp}.html\"\n if os.path.isdir(proto_path):\n report_path = os.path.join(proto_path, default_name)\n else:\n report_path = proto_path\n\n with open(report_path, \"w\", encoding=constants.CHARSET) as fh:\n fh.write(template_content)\n logger.info(f\"生成单次报告: {os.path.basename(report_path)}\")\n\n return report_path" }, { "identifier": "Record", "path": "nexaflow/skills/record.py", "snippet": "class Record(object):\n\n def __init__(self):\n self.__connection: Optional[Popen] = None\n self.__record_event: threading.Event = threading.Event()\n self.__initial: str = \"scrcpy\"\n\n def start_record(self, video_path: str, serial: str = None) -> None:\n cmd = [\n self.__initial, \"--no-audio\", \"--video-bit-rate\", \"8M\", \"--max-fps\", \"60\", \"-Nr\",\n f\"{os.path.join(video_path, 'screen')}.mkv\"\n ]\n if serial:\n cmd.insert(1, \"-s\")\n cmd.insert(2, serial)\n self.__connection = Terminal.cmd_connect(cmd)\n\n def stream(flow: Union[int, IO[str]]) -> None:\n for line in iter(flow.readline, \"\"):\n logger.info(\" \".join(line.strip().split()))\n flow.close()\n\n if self.__connection:\n self.__record_event.set()\n threading.Thread(target=stream, args=(self.__connection.stdout, )).start()\n threading.Thread(target=stream, args=(self.__connection.stderr, )).start()\n time.sleep(1)\n\n def stop_record(self) -> None:\n self.__connection.send_signal(signal.CTRL_C_EVENT)\n self.__record_event.clear()\n self.__connection = None\n\n try:\n Terminal.cmd_oneshot([\"taskkill\", \"/im\", \"scrcpy.exe\"])\n except KeyboardInterrupt:\n logger.info(\"Stop with Ctrl_C_Event ...\")" }, { "identifier": "Player", "path": "nexaflow/skills/player.py", "snippet": "class Player(object):\n\n def __init__(self):\n pygame.mixer.init()\n\n @staticmethod\n def load_all_audio(audio_dirs: str) -> List[Tuple[str, str]]:\n audio_list = []\n for audio_file in os.listdir(audio_dirs):\n if \".mp3\" in audio_file or \".wav\" in audio_file:\n if match := re.search(r\".*?(?=\\.)\", audio_file):\n audio_list.append(\n (match.group(), os.path.join(audio_dirs, audio_file))\n )\n return audio_list\n\n @staticmethod\n def load_audio(audio_dirs: str, audio_name: str) -> Tuple[str, str]:\n query, audio = \"\", \"\"\n for audio_file in os.listdir(audio_dirs):\n if audio_name in audio_file:\n if match := re.search(r\".*?(?=\\.)\", audio_file):\n query = match.group()\n audio = os.path.join(audio_dirs, audio_file)\n return query, audio\n\n @staticmethod\n def play_audio(audio_file: str, volume: float = 1.0):\n if os.path.isfile(audio_file):\n pygame.mixer.music.load(audio_file)\n pygame.mixer.music.set_volume(volume)\n pygame.mixer.music.play()\n logger.info(f\"INFO: Playing audio {audio_file}\")\n while pygame.mixer.music.get_busy():\n pygame.time.Clock().tick(10)\n else:\n logger.error(f\"{audio_file} 不是一个音频文件 ...\")" }, { "identifier": "Switch", "path": "nexaflow/skills/switch.py", "snippet": "class Switch(object):\n\n def __init__(self):\n self.__ffmpeg = \"ffmpeg\"\n self.__ffprobe = \"ffprobe\"\n\n async def audio_reform(self, src: str, dst: str) -> None:\n \"\"\"\n 调整mp3编码格式为标准mp3\n :param src: 原音频路径\n :param dst: 新音频路径\n \"\"\"\n cmd = [self.__ffmpeg, \"-i\", src, \"-ar\", \"44100\", \"-b:a\", \"128k\", dst]\n await Terminal.cmd_line(*cmd)\n\n async def video_reform(self, src: str, dst: str) -> None:\n \"\"\"\n 转换视频格式\n :param src: 原始视频路径\n :param dst: 新视频路径\n \"\"\"\n cmd = [self.__ffmpeg, \"-i\", src, \"-r\", \"60\", dst]\n await Terminal.cmd_line(*cmd)\n\n async def video_change(self, src: str, dst: str) -> None:\n \"\"\"\n 调整视频\n :param src: 原视频路径\n :param dst: 新视频路径\n \"\"\"\n cmd = [\n self.__ffmpeg, \"-i\", src, \"-vf\", \"fps=60\", \"-c:v\",\n \"libx264\", \"-crf\", \"18\", \"-c:a\", \"copy\", dst\n ]\n await Terminal.cmd_line(*cmd)\n\n async def video_tailor(self, src: str, dst: str, start: str = \"00:00:00\", end: str = \"00:00:05\") -> None:\n \"\"\"\n 截取视频\n :param src: 原视频路径\n :param dst: 新视频路径\n :param start: 开始\n :param end: 结束\n \"\"\"\n before = os.path.basename(src).split(\".\")[0]\n after = os.path.basename(src).split(\".\")[-1]\n target = os.path.join(\n dst,\n f\"{before}_{time.strftime('%Y%m%d%H%M%S')}_{random.randint(100, 999)}.{after}\"\n )\n cmd = [self.__ffmpeg, \"-i\", src, \"-ss\", start, \"-t\", end, \"-c\", \"copy\", target]\n await Terminal.cmd_line(*cmd)\n\n async def video_cutter(self, src: str, dst: str, start: str = \"00:00:00\", end: str = \"00:00:05\") -> None:\n \"\"\"\n 流式截取视频\n :param src: 原视频路径\n :param dst: 新视频路径\n :param start: 开始\n :param end: 结束\n \"\"\"\n before = os.path.basename(src).split(\".\")[0]\n after = os.path.basename(src).split(\".\")[-1]\n target = os.path.join(\n dst,\n f\"{before}_{time.strftime('%Y%m%d%H%M%S')}_{random.randint(100, 999)}.{after}\"\n )\n cmd = [\n self.__ffmpeg, \"-i\", src, \"-ss\", start, \"-t\", end, \"-vf\", \"fps=60\",\n \"-c:v\", \"libx264\", \"-crf\", \"18\", \"-c:a\", \"copy\", target\n ]\n await Terminal.cmd_line(*cmd)\n\n async def video_length(self, src: str) -> float:\n \"\"\"\n 查看视频的时间长度\n :param src: 原视频路径\n :return: 视频时间长度\n \"\"\"\n cmd = [\n self.__ffprobe, \"-v\", \"error\", \"-show_entries\", \"format=duration\",\n \"-of\", \"default=noprint_wrappers=1:nokey=1\", \"-i\", src\n ]\n result = await Terminal.cmd_line(*cmd)\n return float(result.strip())" }, { "identifier": "VideoCutter", "path": "nexaflow/cutter/cutter.py", "snippet": "class VideoCutter(object):\n\n def __init__(\n self,\n step: int = None,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n ):\n\n self.step = step or 1\n\n if (not compress_rate) and (not target_size):\n # logger.debug(\n # f\"no compress rate or target size received. set compress rate to 0.2\"\n # )\n compress_rate = 0.2\n\n self._hook_list: typing.List[BaseHook] = list()\n compress_hook = CompressHook(\n overwrite=True, compress_rate=compress_rate, target_size=target_size\n )\n grey_hook = GreyHook(overwrite=True)\n self.add_hook(compress_hook)\n self.add_hook(grey_hook)\n\n def add_hook(self, new_hook: BaseHook):\n self._hook_list.append(new_hook)\n # logger.debug(f\"add hook: {new_hook.__class__.__name__}\")\n\n @staticmethod\n def pic_split(origin: np.ndarray, block: int) -> typing.List[np.ndarray]:\n result: typing.List[np.ndarray] = list()\n for each_block in np.array_split(origin, block, axis=0):\n sub_block = np.array_split(each_block, block, axis=1)\n result += sub_block\n return result\n\n def _apply_hook(self, frame: VideoFrame, *args, **kwargs) -> VideoFrame:\n for each_hook in self._hook_list:\n frame = each_hook.do(frame, *args, **kwargs)\n return frame\n\n @staticmethod\n def compare_frame_list(\n src: typing.List[np.ndarray], target: typing.List[np.ndarray]\n ) -> typing.List[float]:\n\n ssim = 1.0\n mse = 0.0\n psnr = 0.0\n\n for part_index, (each_start, each_end) in enumerate(zip(src, target)):\n part_ssim = toolbox.compare_ssim(each_start, each_end)\n if part_ssim < ssim:\n ssim = part_ssim\n\n part_mse = toolbox.calc_mse(each_start, each_end)\n if part_mse > mse:\n mse = part_mse\n\n part_psnr = toolbox.calc_psnr(each_start, each_end)\n if part_psnr > psnr:\n psnr = part_psnr\n # logger.debug(\n # f\"part {part_index}: ssim={part_ssim}; mse={part_mse}; psnr={part_psnr}\"\n # )\n return [ssim, mse, psnr]\n\n @staticmethod\n def split_into_parts(value: int, parts: int) -> List[Tuple[int, int, int]]:\n division, remainder = value // parts, value % parts\n result, current_start = [], 1\n\n for i in range(parts):\n current_end = current_start + division - 1\n if i == parts - 1: # 处理最后一部分,加上余数\n current_end += remainder\n result.append((current_start, current_end, current_end - current_start))\n\n if i < parts - 1: # 不是最后一部分时,添加断开部分\n gap_start = current_end\n gap_end = current_end + 1\n result.append((gap_start, gap_end, gap_end - gap_start))\n current_start = current_end + 1\n\n return result\n\n def handler_frames(self, window: Window) -> typing.List[VideoCutRange]:\n range_list_part = []\n\n def technique():\n frame_list = window.load_data()\n frame_list = [self._apply_hook(each) for each in frame_list]\n\n ssim_list, mse_list, psnr_list = [], [], []\n\n cur_frame = frame_list[0]\n first_target_frame = frame_list[1]\n cur_frame_list = self.pic_split(cur_frame.data, window.block)\n for each in frame_list[1:]:\n each_frame_list = self.pic_split(each.data, window.block)\n ssim, mse, psnr = self.compare_frame_list(\n cur_frame_list, each_frame_list\n )\n ssim_list.append(ssim)\n mse_list.append(mse)\n psnr_list.append(psnr)\n\n ssim = window.float_merge(ssim_list)\n mse = window.float_merge(mse_list)\n psnr = window.float_merge(psnr_list)\n\n range_list_part.append(\n VideoCutRange(\n window.video,\n start=cur_frame.frame_id, end=first_target_frame.frame_id,\n ssim=[ssim], mse=[mse], psnr=[psnr],\n start_time=cur_frame.timestamp, end_time=first_target_frame.timestamp,\n )\n )\n\n pbar = toolbox.show_progress(window.frame_total, 174, \"Cutter\")\n while True:\n technique()\n pbar.update(1)\n\n continue_flag = window.shift()\n if not continue_flag:\n pbar.close()\n break\n\n return range_list_part\n\n def _convert_video_into_range_list(\n self, video: VideoObject, block: int, window_size: int, window_coefficient: int\n ) -> typing.List[VideoCutRange]:\n\n step = self.step\n video_length = video.frame_count\n range_list: typing.List[VideoCutRange] = list()\n logger.info(f\"总帧数: {video_length} 片段数: {video_length - 1} 分辨率: {video.frame_size}\")\n\n window_list: List[\"Window\"] = []\n for index, parts in enumerate(self.split_into_parts(video_length, 2)):\n start, end, size = parts\n logger.info(f\"帧片段: {index + 1:02} Start: {start:03} End: {end:03} Length: {size:03}\")\n window = Window(video, step, block, window_size, window_coefficient, start, end, size)\n window_list.append(window)\n\n with ThreadPoolExecutor() as executor:\n futures = [executor.submit(self.handler_frames, w) for w in window_list]\n for future in futures:\n range_list.extend(future.result())\n\n return range_list\n\n def cut(\n self,\n video: typing.Union[str, VideoObject],\n block: int = None,\n window_size: int = None,\n window_coefficient: int = None,\n *_,\n **kwargs,\n ) -> VideoCutResult:\n\n if not block:\n block = 3\n if not window_size:\n window_size = 1\n if not window_coefficient:\n window_coefficient = 2\n\n start_time = time.time()\n if isinstance(video, str):\n video = VideoObject(video)\n\n logger.info(f\"开始压缩视频: {os.path.basename(video.path)}\")\n range_list = self._convert_video_into_range_list(\n video, block, window_size, window_coefficient\n )\n logger.info(f\"视频压缩完成: {os.path.basename(video.path)}\")\n logger.info(f\"视频压缩耗时: {(time.time() - start_time):.2f}秒\")\n\n return VideoCutResult(video, range_list, cut_kwargs=kwargs)" }, { "identifier": "VideoObject", "path": "nexaflow/video.py", "snippet": "class VideoObject(object):\n\n def __init__(\n self,\n path: typing.Union[str, os.PathLike],\n fps: int = None,\n ):\n \"\"\"\n 初始化,检查文件路径是否有效,执行其他一些初始化操作\n \"\"\"\n assert os.path.isfile(path), f\"video {path} not existed\"\n self.path: str = str(path)\n self.grey_data: typing.Optional[typing.Tuple[\"VideoFrame\"]] = tuple() # 灰度帧\n self.hued_data: typing.Optional[typing.Tuple[\"ColorFrame\"]] = tuple() # 彩色帧\n\n if fps:\n video_path = os.path.join(tempfile.mkdtemp(), f\"tmp_{fps}.mp4\")\n logger.debug(f\"convert video, and bind path to {video_path}\")\n logger.info(f\"转换视频: {video_path}\")\n toolbox.fps_convert(\n fps, self.path, video_path, imageio_ffmpeg.get_ffmpeg_exe()\n )\n self.path = video_path\n\n with toolbox.video_capture(self.path) as cap:\n self.frame_count = toolbox.get_frame_count(cap)\n self.frame_size = toolbox.get_frame_size(cap)\n\n logger.info(f\"视频已生成,视频帧长度: {self.frame_count} 分辨率: {self.frame_size}\")\n\n def __str__(self):\n return f\"<VideoObject path={self.path}>\"\n\n __repr__ = __str__\n\n def sync_timestamp(self, frame_data: tuple[VideoFrame]) -> None:\n assert frame_data, \"load_frames() first\"\n vid = mpy.VideoFileClip(self.path)\n\n vid_count = vid.reader.nframes\n pbar = toolbox.show_progress(vid_count, 153, \"Synzer\")\n for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):\n if frame_id >= len(frame_data):\n break\n # frame_id_real = frame_id + 1\n if not frame_data[frame_id].timestamp:\n # logger.debug(f\"fix frame {frame_id_real}'s timestamp: {timestamp}\")\n frame_data[frame_id].timestamp = timestamp\n pbar.update(1)\n pbar.close()\n\n def sync_backstage(self, frame_data: tuple[ColorFrame]) -> None:\n assert frame_data, \"load_frames() first\"\n vid = mpy.VideoFileClip(self.path)\n\n for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):\n if frame_id >= len(frame_data):\n break\n # frame_id_real = frame_id + 1\n if not frame_data[frame_id].timestamp:\n # logger.debug(f\"fix frame {frame_id_real}'s timestamp: {timestamp}\")\n frame_data[frame_id].timestamp = timestamp\n\n def clean_frames(self):\n \"\"\"\n 清除所有帧数据\n \"\"\"\n self.grey_data = tuple()\n self.hued_data = tuple()\n\n @staticmethod\n def frame_details(frame_type):\n each_cost = frame_type[0].data.nbytes / (1024 ** 2)\n total_cost = each_cost * len(frame_type)\n frame_size = frame_type[0].data.shape[::-1]\n return f\"{frame_type[0].__class__.__name__}: [{each_cost:.2f} MB] [{total_cost:.2f} MB] {frame_size}\"\n\n def load_frames(self, color: bool = False):\n \"\"\"\n 从文件中加载所有帧到内存\n \"\"\"\n logger.info(f\"加载视频帧到内存: {os.path.basename(self.path)}\")\n\n def load_stream(frames: type[VideoFrame]):\n pbar = toolbox.show_progress(self.frame_count, 180, \"Loader\")\n data: list[VideoFrame] = []\n with toolbox.video_capture(self.path) as cap:\n for success, frame in iter(lambda: cap.read(), (False, None)):\n if success:\n data.append(frames.initial(cap, frame))\n pbar.update(1)\n pbar.close()\n return data\n\n def back_ground(frames: type[ColorFrame]):\n data: list[ColorFrame] = []\n with toolbox.video_capture(self.path) as cap:\n for success, frame in iter(lambda: cap.read(), (False, None)):\n if success:\n data.append(frames.initial(cap, frame))\n return data\n\n def load_stream_sync(brand):\n self.sync_timestamp(tuple(frame_data := load_stream(brand)))\n return frame_data\n\n def back_ground_sync(brand):\n self.sync_backstage(tuple(frame_data := back_ground(brand)))\n return frame_data\n\n start_time, task, hued = time.time(), None, None\n if color:\n task = ThreadPoolExecutor()\n hued = task.submit(back_ground_sync, ColorFrame)\n\n grey = load_stream_sync(VideoFrame)\n self.grey_data = tuple(grey)\n logger.info(f\"灰度帧已加载: {self.frame_details(self.grey_data)}\")\n logger.info(f\"视频加载耗时: {time.time() - start_time:.2f} 秒\")\n return task, hued\n\n def _read_from_file(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 从文件中读取帧\n \"\"\"\n with toolbox.video_capture(self.path) as cap:\n success, frame = cap.read()\n while success:\n yield VideoFrame.initial(cap, frame)\n success, frame = cap.read()\n\n def _read_from_mem(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 从内存中读取帧\n \"\"\"\n for each_frame in self.grey_data:\n yield each_frame\n\n def _read(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 选择从文件还是从内存中读取帧\n \"\"\"\n if self.grey_data:\n yield from self._read_from_mem()\n else:\n yield from self._read_from_file()\n\n def get_iterator(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 获取帧的迭代器\n \"\"\"\n return self._read()\n\n def get_operator(self) -> _BaseFrameOperator:\n \"\"\"\n 根据是否已经加载帧,返回相应的FrameOperator(`MemFrameOperator`或`FileFrameOperator`)\n \"\"\"\n if self.grey_data:\n return MemFrameOperator(self)\n return FileFrameOperator(self)\n\n def __iter__(self):\n \"\"\"\n 返回一个用于迭代帧的迭代器\n \"\"\"\n return self.get_iterator()" }, { "identifier": "Frame", "path": "nexaflow/video.py", "snippet": "class Frame(object):\n\n def __init__(self, frame_id: int, timestamp: float, data: np.ndarray):\n self.frame_id: int = frame_id\n self.timestamp: float = timestamp\n self.data: np.ndarray = data\n\n @staticmethod\n def initial(cap: cv2.VideoCapture, frame: np.ndarray) -> \"Frame\":\n raise NotImplementedError\n\n def copy(self) -> \"Frame\":\n raise NotImplementedError" }, { "identifier": "KerasClassifier", "path": "nexaflow/classifier/keras_classifier.py", "snippet": "class KerasClassifier(BaseModelClassifier):\n\n UNKNOWN_STAGE_NAME = constants.UNKNOWN_STAGE_FLAG\n MODEL_DENSE = 6\n\n def __init__(\n self,\n score_threshold: float = None,\n data_size: typing.Sequence[int] = None,\n nb_train_samples: int = None,\n nb_validation_samples: int = None,\n epochs: int = None,\n batch_size: int = None,\n *_,\n **__,\n ):\n super(KerasClassifier, self).__init__(*_, **__)\n\n # 模型\n self._model: typing.Optional[keras.Sequential] = None\n # 配置\n self.score_threshold: float = score_threshold or 0.0\n self.data_size: typing.Sequence[int] = data_size or (200, 200)\n self.nb_train_samples: int = nb_train_samples or 64\n self.nb_validation_samples: int = nb_validation_samples or 64\n self.epochs: int = epochs or 20\n self.batch_size: int = batch_size or 4\n\n # logger.debug(f\"score threshold: {self.score_threshold}\")\n # logger.debug(f\"data size: {self.data_size}\")\n # logger.debug(f\"nb train samples: {self.nb_train_samples}\")\n # logger.debug(f\"nb validation samples: {self.nb_validation_samples}\")\n # logger.debug(f\"epochs: {self.epochs}\")\n # logger.debug(f\"batch size: {self.batch_size}\")\n\n @property\n def follow_keras_size(self):\n return self.data_size[1], self.data_size[0]\n\n @property\n def follow_cv_size(self):\n return self.data_size[0], self.data_size[1]\n\n def clean_model(self):\n self._model = None\n\n def save_model(self, model_path: str, overwrite: bool = None):\n logger.debug(f\"save model to {model_path}\")\n # assert model file\n if os.path.isfile(model_path) and not overwrite:\n raise FileExistsError(\n f\"model file {model_path} already existed, you can set `overwrite` True to cover it\"\n )\n # assert model data is not empty\n assert self._model, \"model is empty\"\n print(self._model.summary())\n self._model.save_weights(model_path)\n\n def load_model(self, model_path: str, overwrite: bool = None):\n # logger.debug(f\"load model from {model_path}\")\n logger.info(f\"加载Keras神经网络引擎 ...\")\n # assert model file\n assert os.path.isfile(model_path), f\"model file {model_path} not existed\"\n # assert model data is empty\n if self._model and not overwrite:\n raise RuntimeError(\n f\"model is not empty, you can set `overwrite` True to cover it\"\n )\n self._model = self.create_model()\n self._model.load_weights(model_path)\n\n def create_model(self) -> keras.Sequential:\n # logger.info(f\"creating Keras sequential model\")\n logger.info(\"Keras神经网络引擎创建图像分析模型 ...\")\n if keras.backend.image_data_format() == \"channels_first\":\n input_shape = (1, *self.follow_keras_size)\n else:\n input_shape = (*self.follow_keras_size, 1)\n\n model = keras.Sequential()\n\n model.add(keras.layers.Conv2D(32, (3, 3), padding=\"same\", input_shape=input_shape))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n\n model.add(keras.layers.Conv2D(64, (3, 3), padding=\"same\"))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n\n model.add(keras.layers.Conv2D(128, (3, 3), padding=\"same\"))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(256, activation=\"relu\"))\n model.add(keras.layers.Dropout(0.5))\n model.add(keras.layers.Dense(self.MODEL_DENSE, activation=\"softmax\"))\n\n model.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n\n # logger.info(\"Keras model created\")\n logger.info(\"Keras神经网络引擎加载完成,开始分析图像 ...\")\n return model\n\n def train(self, data_path: str = None, *_, **__):\n\n def _data_verify(p: str):\n p = pathlib.Path(p)\n assert p.is_dir(), f\"{p} is not a valid directory\"\n\n number_of_dir = len([each for each in os.listdir(p) if (p / each).is_dir()])\n assert (\n number_of_dir > 1\n ), f\"dataset only contains one class. maybe some path errors happened: {p}?\"\n\n assert number_of_dir <= self.MODEL_DENSE, (\n f\"dataset has {number_of_dir} classes (more than \" + str(self.MODEL_DENSE) + \")\"\n )\n\n _data_verify(data_path)\n\n if not self._model:\n self._model = self.create_model()\n\n datagen = keras.preprocessing.image.ImageDataGenerator(\n rescale=1.0 / 16,\n shear_range=0.2,\n zoom_range=0.2,\n validation_split=0.33,\n horizontal_flip=True # 水平翻转增强\n )\n\n train_generator = datagen.flow_from_directory(\n data_path,\n target_size=self.follow_keras_size,\n batch_size=self.batch_size,\n color_mode=\"grayscale\",\n class_mode=\"sparse\",\n subset=\"training\",\n )\n\n validation_generator = datagen.flow_from_directory(\n data_path,\n target_size=self.follow_keras_size,\n batch_size=self.batch_size,\n color_mode=\"grayscale\",\n class_mode=\"sparse\",\n subset=\"validation\",\n )\n\n self._model.fit(\n train_generator,\n epochs=self.epochs,\n validation_data=validation_generator,\n )\n\n logger.debug(\"train finished\")\n\n def predict(self, pic_path: str, *args, **kwargs) -> str:\n pic_object = toolbox.imread(pic_path)\n # fake VideoFrame for apply_hook\n fake_frame = VideoFrame(0, 0.0, pic_object)\n fake_frame = self._apply_hook(fake_frame, *args, **kwargs)\n return self.predict_with_object(fake_frame.data)\n\n def predict_with_object(self, frame: np.ndarray) -> str:\n # resize for model\n frame = cv2.resize(frame, dsize=self.follow_cv_size)\n frame = np.expand_dims(frame, axis=[0, -1])\n # verbose = 0, 静默Keras分类显示\n result = self._model.predict(frame, verbose=0)\n tag = str(np.argmax(result, axis=1)[0])\n confidence = result.max()\n # logger.debug(f\"confidence: {confidence}\")\n if confidence < self.score_threshold:\n logger.warning(\n f\"max score is lower than {self.score_threshold}, unknown class\"\n )\n return self.UNKNOWN_STAGE_NAME\n return tag\n\n def _classify_frame(self, frame: VideoFrame, *_, **__) -> str:\n return self.predict_with_object(frame.data)" }, { "identifier": "BaseHook", "path": "nexaflow/hook.py", "snippet": "class BaseHook(object):\n\n def __init__(self, *_, **__):\n # logger.debug(f\"start initialing: {self.__class__.__name__} ...\")\n logger.info(f\"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...\")\n self.result = dict()\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n # info = f\"execute hook: {self.__class__.__name__}\"\n\n frame_id = frame.frame_id\n if frame_id != -1:\n # logger.debug(f\"{info}, frame id: {frame_id}\")\n pass\n return frame" }, { "identifier": "CropHook", "path": "nexaflow/hook.py", "snippet": "class CropHook(_AreaBaseHook):\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n\n height_range, width_range = self.convert_size_and_offset(*frame.data.shape)\n frame.data[: height_range[0], :] = 0\n frame.data[height_range[1]:, :] = 0\n frame.data[:, : width_range[0]] = 0\n frame.data[:, width_range[1]:] = 0\n return frame" }, { "identifier": "OmitHook", "path": "nexaflow/hook.py", "snippet": "class OmitHook(_AreaBaseHook):\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n\n height_range, width_range = self.convert_size_and_offset(*frame.data.shape)\n frame.data[\n height_range[0]: height_range[1], width_range[0]: width_range[1]\n ] = 0\n return frame" }, { "identifier": "FrameSaveHook", "path": "nexaflow/hook.py", "snippet": "class FrameSaveHook(BaseHook):\n\n def __init__(self, target_dir: str, *_, **__):\n super().__init__(*_, **__)\n\n self.target_dir = target_dir\n os.makedirs(target_dir, exist_ok=True)\n # logger.debug(f\"target dir: {target_dir}\")\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n\n safe_timestamp = str(frame.timestamp).replace(\".\", \"_\")\n frame_name = f\"{frame.frame_id}({safe_timestamp}).png\"\n target_path = os.path.join(self.target_dir, frame_name)\n\n # 不能保存中文路径\n # cv2.imwrite(target_path, frame.data)\n # logger.debug(f\"frame saved to {target_path}\")\n\n # 保存中文路径\n cv2.imencode(\".png\", frame.data)[1].tofile(target_path)\n\n return frame" }, { "identifier": "ClassifierResult", "path": "nexaflow/classifier/base.py", "snippet": "class ClassifierResult(object):\n\n LABEL_DATA: str = \"data\"\n LABEL_VIDEO_PATH: str = \"video_path\"\n\n def __init__(self, data: typing.List[SingleClassifierResult]):\n self.video_path: str = data[0].video_path\n self.data: typing.List[SingleClassifierResult] = data\n\n def get_timestamp_list(self) -> typing.List[float]:\n return [each.timestamp for each in self.data]\n\n def get_stage_list(self) -> typing.List[str]:\n return [each.stage for each in self.data]\n\n def get_length(self) -> int:\n return len(self.data)\n\n def get_offset(self) -> float:\n return self.data[1].timestamp - self.data[0].timestamp\n\n def get_ordered_stage_set(self) -> typing.List[str]:\n ret = list()\n for each in self.get_stage_list():\n if not ret:\n ret.append(each)\n continue\n if each == ret[-1]:\n continue\n ret.append(each)\n return ret\n\n def get_stage_set(self) -> typing.Set[str]:\n return set(self.get_stage_list())\n\n def to_dict(\n self,\n ) -> typing.Dict[str, typing.List[typing.List[SingleClassifierResult]]]:\n stage_list = list(self.get_stage_set())\n try:\n int(stage_list[0])\n except ValueError:\n stage_list.sort()\n else:\n stage_list.sort(key=lambda o: int(o))\n\n d = OrderedDict()\n for each_stage in stage_list:\n d[each_stage] = self.get_specific_stage_range(each_stage)\n return d\n\n def contain(self, stage_name: str) -> bool:\n return stage_name in self.get_stage_set()\n\n def first(self, stage_name: str) -> SingleClassifierResult:\n for each in self.data:\n if each.stage == stage_name:\n # logger.debug(f\"first frame of {stage_name}: {each}\")\n return each\n logger.warning(f\"no stage named {stage_name} found\")\n\n def last(self, stage_name: str) -> SingleClassifierResult:\n for each in self.data[::-1]:\n if each.stage == stage_name:\n # logger.debug(f\"last frame of {stage_name}: {each}\")\n return each\n logger.warning(f\"no stage named {stage_name} found\")\n\n def get_stage_range(self) -> typing.List[typing.List[SingleClassifierResult]]:\n result: typing.List[typing.List[SingleClassifierResult]] = []\n\n cur = self.data[0]\n cur_index = cur.frame_id - 1\n ptr = cur_index\n length = self.get_length()\n while ptr < length:\n next_one = self.data[ptr]\n if cur.stage == next_one.stage:\n ptr += 1\n continue\n\n result.append(self.data[cur_index: ptr + 1 - 1] or [self.data[cur_index]])\n cur = next_one\n cur_index = next_one.frame_id - 1\n\n assert len(result) > 0, \"video seems to only contain one stage\"\n\n last_data = self.data[-1]\n last_result = result[-1][-1]\n if last_result != last_data:\n result.append(\n self.data[last_result.frame_id - 1 + 1: last_data.frame_id - 1 + 1]\n or [self.data[last_result.frame_id - 1]]\n )\n # logger.debug(f\"get stage range: {result}\")\n return result\n\n def get_specific_stage_range(\n self, stage_name: str\n ) -> typing.List[typing.List[SingleClassifierResult]]:\n ret = list()\n for each_range in self.get_stage_range():\n cur = each_range[0]\n if cur.stage == stage_name:\n ret.append(each_range)\n return ret\n\n def get_not_stable_stage_range(\n self,\n ) -> typing.List[typing.List[SingleClassifierResult]]:\n unstable = self.get_specific_stage_range(constants.UNSTABLE_FLAG)\n ignore = self.get_specific_stage_range(constants.IGNORE_FLAG)\n return sorted(unstable + ignore, key=lambda x: x[0].stage)\n\n def mark_range(self, start: int, end: int, target_stage: str):\n for each in self.data[start:end]:\n each.stage = target_stage\n # logger.debug(f\"range {start} to {end} has been marked as {target_stage}\")\n\n def mark_range_unstable(self, start: int, end: int):\n self.mark_range(start, end, constants.UNSTABLE_FLAG)\n\n def mark_range_ignore(self, start: int, end: int):\n self.mark_range(start, end, constants.IGNORE_FLAG)\n\n def time_cost_between(self, start_stage: str, end_stage: str) -> float:\n return self.first(end_stage).timestamp - self.last(start_stage).timestamp\n\n def get_important_frame_list(self) -> typing.List[SingleClassifierResult]:\n result = [self.data[0]]\n\n prev = self.data[0]\n for cur in self.data[1:]:\n if cur.stage != prev.stage:\n result.append(prev)\n result.append(cur)\n prev = cur\n\n if result[-1] != self.data[-1]:\n result.append(self.data[-1])\n return result\n\n def calc_changing_cost(\n self,\n ) -> typing.Dict[str, typing.Tuple[SingleClassifierResult, SingleClassifierResult]]:\n\n cost_dict: typing.Dict[\n str, typing.Tuple[SingleClassifierResult, SingleClassifierResult]\n ] = {}\n i = 0\n while i < len(self.data) - 1:\n cur = self.data[i]\n next_one = self.data[i + 1]\n\n if not next_one.is_stable():\n for j in range(i + 1, len(self.data)):\n i = j\n next_one = self.data[j]\n if next_one.is_stable():\n break\n\n changing_name = f\"from {cur.stage} to {next_one.stage}\"\n cost_dict[changing_name] = (cur, next_one)\n else:\n i += 1\n return cost_dict\n\n def dumps(self) -> str:\n\n def _handler(obj: object):\n if isinstance(obj, np.ndarray):\n return \"<np.ndarray object>\"\n return obj.__dict__\n\n return json.dumps(self, sort_keys=True, default=_handler)\n\n def dump(self, json_path: str, **kwargs):\n logger.debug(f\"dump result to {json_path}\")\n assert not os.path.isfile(json_path), f\"{json_path} already existed\"\n with open(json_path, \"w+\", **kwargs) as f:\n f.write(self.dumps())\n\n @classmethod\n def load(cls, from_file: str) -> \"ClassifierResult\":\n assert os.path.isfile(from_file), f\"file {from_file} not existed\"\n with open(from_file, encoding=constants.CHARSET) as f:\n content = json.load(f)\n\n data = content[cls.LABEL_DATA]\n return ClassifierResult([SingleClassifierResult(**each) for each in data])\n\n def diff(self, another: \"ClassifierResult\") -> DiffResult:\n return DiffResult(self, another)\n\n def is_order_correct(self, should_be: typing.List[str]) -> bool:\n cur = self.get_ordered_stage_set()\n len_cur, len_should_be = len(cur), len(should_be)\n if len_cur == len_should_be:\n return cur == should_be\n if len_cur < len_should_be:\n return False\n\n ptr_should, ptr_cur = 0, 0\n while ptr_cur < len_cur:\n if cur[ptr_cur] == should_be[ptr_should]:\n ptr_should += 1\n ptr_cur += 1\n if ptr_should == len_should_be:\n return True\n return False\n\n get_frame_length = get_offset" }, { "identifier": "SingleClassifierResult", "path": "nexaflow/classifier/base.py", "snippet": "class SingleClassifierResult(object):\n\n def __init__(\n self,\n video_path: str,\n frame_id: int,\n timestamp: float,\n stage: str,\n data: np.ndarray = None,\n ):\n self.video_path: str = video_path\n self.frame_id: int = frame_id\n self.timestamp: float = timestamp\n self.stage: str = stage\n self.data: np.ndarray = data\n\n def to_video_frame(self, *args, **kwargs) -> VideoFrame:\n if self.data is not None:\n return VideoFrame(self.frame_id, self.timestamp, self.data)\n\n with toolbox.video_capture(self.video_path) as cap:\n frame = toolbox.get_frame(cap, self.frame_id)\n compressed = toolbox.compress_frame(frame, *args, **kwargs)\n return VideoFrame(self.frame_id, self.timestamp, compressed)\n\n def get_data(self) -> np.ndarray:\n return self.to_video_frame().data\n\n def is_stable(self) -> bool:\n return self.stage not in (\n constants.UNSTABLE_FLAG,\n constants.IGNORE_FLAG,\n constants.UNKNOWN_STAGE_FLAG,\n )\n\n def contain_image(\n self, *, image_path: str = None, image_object: np.ndarray = None, **kwargs\n ) -> typing.Dict[str, typing.Any]:\n return self.to_video_frame().contain_image(\n image_path=image_path, image_object=image_object, **kwargs\n )\n\n def to_dict(self) -> typing.Dict:\n return self.__dict__\n\n def __str__(self):\n return f\"<ClassifierResult stage={self.stage} frame_id={self.frame_id} timestamp={self.timestamp}>\"\n\n __repr__ = __str__" } ]
import os import cv2 import time import random import asyncio from loguru import logger from typing import List, Union, Optional from concurrent.futures import ThreadPoolExecutor from nexaflow import toolbox from nexaflow.skills.report import Report from nexaflow.skills.record import Record from nexaflow.skills.player import Player from nexaflow.skills.switch import Switch from nexaflow.cutter.cutter import VideoCutter from nexaflow.video import VideoObject, Frame from nexaflow.classifier.keras_classifier import KerasClassifier from nexaflow.hook import BaseHook, CropHook, OmitHook, FrameSaveHook from nexaflow.classifier.base import ClassifierResult, SingleClassifierResult
18,526
self.threshold = kwargs.get("threshold", 0.97) self.offset = kwargs.get("threshold", 3) self.compress_rate = kwargs.get("compress_rate", 0.5) self.window_size = kwargs.get("window_size", 1) self.window_coefficient = kwargs.get("window_coefficient", 2) def validate(): screen_tag, screen_cap = None, None if os.path.isfile(self.report.video_path): screen = cv2.VideoCapture(self.report.video_path) if screen.isOpened(): screen_tag = os.path.basename(self.report.video_path) screen_cap = self.report.video_path screen.release() elif os.path.isdir(self.report.video_path): if len( file_list := [ file for file in os.listdir(self.report.video_path) if os.path.isfile( os.path.join(self.report.video_path, file) ) ] ) > 1 or len(file_list) == 1: screen = cv2.VideoCapture(os.path.join(self.report.video_path, file_list[0])) if screen.isOpened(): screen_tag = os.path.basename(file_list[0]) screen_cap = os.path.join(self.report.video_path, file_list[0]) screen.release() return screen_tag, screen_cap def frame_flip(): if focus: change_record = os.path.join( os.path.dirname(screen_record), f"screen_fps60_{random.randint(100, 999)}.mp4" ) asyncio.run(self.ffmpeg.video_change(screen_record, change_record)) logger.info(f"视频转换完成: {os.path.basename(change_record)}") os.remove(screen_record) logger.info(f"移除旧的视频: {os.path.basename(screen_record)}") else: change_record = screen_record video = VideoObject(change_record) task, hued = video.load_frames(color) return video, task, hued def frame_flow(): video, task, hued = frame_flip() classify = self.framix.pixel_wizard(video) important_frames: List["SingleClassifierResult"] = classify.get_important_frame_list() pbar = toolbox.show_progress(classify.get_length(), 50, "Faster") frames_list = [] if boost: frames_list.append(previous := important_frames[0]) pbar.update(1) for current in important_frames[1:]: frames_list.append(current) pbar.update(1) frames_diff = current.frame_id - previous.frame_id if not previous.is_stable() and not current.is_stable() and frames_diff > 1: for specially in classify.data[previous.frame_id: current.frame_id - 1]: frames_list.append(specially) pbar.update(1) previous = current pbar.close() else: for current in classify.data: frames_list.append(current) pbar.update(1) pbar.close() if color: video.hued_data = tuple(hued.result()) logger.info(f"彩色帧已加载: {video.frame_details(video.hued_data)}") task.shutdown() frames = [video.hued_data[frame.frame_id - 1] for frame in frames_list] else: frames = [frame for frame in frames_list] return classify, frames def frame_flick(classify): try: start_frame = classify.get_not_stable_stage_range()[0][1] end_frame = classify.get_not_stable_stage_range()[-1][-1] except AssertionError: start_frame = classify.get_important_frame_list()[0] end_frame = classify.get_important_frame_list()[-1] if start_frame == end_frame: start_frame = classify.data[0] end_frame = classify.data[-1] time_cost = end_frame.timestamp - start_frame.timestamp before, after, final = f"{start_frame.timestamp:.5f}", f"{end_frame.timestamp:.5f}", f"{time_cost:.5f}" logger.info(f"图像分类结果: [开始帧: {before}] [结束帧: {after}] [总耗时: {final}]") original_inform = self.report.draw( classifier_result=classify, proto_path=self.report.proto_path, target_size=Alynex.target_size ) result = { "total_path": self.report.total_path, "title": self.report.title, "query_path": self.report.query_path, "query": self.report.query, "stage": { "start": start_frame.frame_id, "end": end_frame.frame_id, "cost": f"{time_cost:.5f}" }, "frame": self.report.frame_path, "extra": self.report.extra_path, "proto": original_inform } logger.debug(f"Restore: {result}") self.report.load(result) return before, after, final
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record() self.__player: Optional[Player] = Player() self.__ffmpeg: Optional[Switch] = Switch() self.__filmer: Optional[Alynex._Filmer] = Alynex._Filmer() self.__framix: Optional[Alynex._Framix] = None def __str__(self): return (f""" <Alynex for NexaFlow Target Size: {self.target_size} Fps: {self.fps} Step: {self.step} Block: {self.block} Threshold: {self.threshold} Offset: {self.offset} Compress Rate: {self.compress_rate} Window Size: {self.window_size} Window Coefficient: {self.window_coefficient} > """) __repr__ = __str__ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @property def report(self) -> "Report": assert self.__report, f"{self.activate.__name__} first ..." return self.__report @property def record(self) -> "Record": return self.__record @property def player(self) -> "Player": return self.__player @property def ffmpeg(self) -> "Switch": return self.__ffmpeg @property def filmer(self) -> "Alynex._Filmer": return self.__filmer @property def framix(self) -> "Alynex._Framix": assert self.__framix, f"{self.activate.__name__} first ..." return self.__framix @staticmethod def only_video(folder: str) -> List: class Entry(object): def __init__(self, title: str, place: str, sheet: list): self.title = title self.place = place self.sheet = sheet return [ Entry( os.path.basename(root), root, [os.path.join(root, f) for f in sorted(file)] ) for root, _, file in os.walk(folder) if file ] def activate(self, models: str, total_path: str): if not self.__report: self.__report = Report(total_path) self.__framix = Alynex._Framix(self.report) Alynex.kc.load_model(models) class _Filmer(object): @staticmethod def train_model(video_file: str) -> None: model_path = os.path.join( os.path.dirname(video_file), f"Model_{time.strftime('%Y%m%d%H%M%S')}_{os.getpid()}" ) if not os.path.exists(model_path): os.makedirs(model_path, exist_ok=True) # 将视频切分成帧 video = VideoObject(video_file, fps=Alynex.fps) # 新建帧,计算视频总共有多少帧,每帧多少ms video.load_frames() # 压缩视频 cutter = VideoCutter( target_size=Alynex.target_size ) # 计算每一帧视频的每一个block的ssim和峰值信噪比 res = cutter.cut( video=video, block=Alynex.block, window_size=Alynex.window_size, window_coefficient=Alynex.window_coefficient ) # 计算出判断A帧到B帧之间是稳定还是不稳定 stable, unstable = res.get_range( threshold=Alynex.threshold, offset=Alynex.offset ) # 保存分类后的图片 res.pick_and_save( range_list=stable, frame_count=20, to_dir=model_path, meaningful_name=True ) @staticmethod def build_model(src: str) -> None: new_model_path = os.path.join(src, f"Create_Model_{time.strftime('%Y%m%d%H%M%S')}") new_model_name = f"Keras_Model_{random.randint(10000, 99999)}.h5" final_model = os.path.join(new_model_path, new_model_name) if not os.path.exists(new_model_path): os.makedirs(new_model_path, exist_ok=True) Alynex.kc.train(src) Alynex.kc.save_model(final_model, overwrite=True) class _Framix(object): def __init__(self, report: "Report"): self.__framix_list: List["BaseHook"] = [] self.__reporter = report @property def framix_list(self) -> List["BaseHook"]: return self.__framix_list def crop_hook( self, x: Union[int | float], y: Union[int | float], x_size: Union[int | float], y_size: Union[int | float] ) -> None: """获取区域""" hook = CropHook((y_size, x_size), (y, x)) self.framix_list.append(hook) def omit_hook( self, x: Union[int | float], y: Union[int | float], x_size: Union[int | float], y_size: Union[int | float] ) -> None: """忽略区域""" hook = OmitHook((y_size, x_size), (y, x)) self.framix_list.append(hook) def pixel_wizard(self, video: "VideoObject") -> "ClassifierResult": cutter = VideoCutter( target_size=Alynex.target_size ) # 应用视频帧处理单元 for mix in self.framix_list: cutter.add_hook(mix) save_hook = FrameSaveHook(self.__reporter.extra_path) cutter.add_hook(save_hook) # 计算每一帧视频的每一个block的ssim和峰值信噪比 res = cutter.cut( video=video, block=Alynex.block, window_size=Alynex.window_size, window_coefficient=Alynex.window_coefficient ) # 计算出判断A帧到B帧之间是稳定还是不稳定 stable, unstable = res.get_range( threshold=Alynex.threshold, offset=Alynex.offset ) # 保存十二张hook图 files = os.listdir(self.__reporter.extra_path) files.sort(key=lambda x: int(x.split("(")[0])) total_images = len(files) interval = total_images // 11 if total_images > 12 else 1 for index, file in enumerate(files): if index % interval != 0: os.remove( os.path.join(self.__reporter.extra_path, file) ) # 为图片绘制线条 draws = os.listdir(self.__reporter.extra_path) for draw in draws: toolbox.draw_line( os.path.join(self.__reporter.extra_path, draw) ) # 开始图像分类 classify = Alynex.kc.classify(video=video, valid_range=stable, keep_data=True) return classify class _Review(object): def __init__(self, *args: str): self.start, self.end, self.cost, *_ = args def __str__(self): return f"<Review Start: {self.start} End: {self.end} Cost: {self.cost}>" __repr__ = __str__ def analyzer( self, boost: bool = True, color: bool = True, focus: bool = False, **kwargs ) -> Optional["Alynex._Review"]: """ 智能分类帧数据 :param boost: 跳帧模式 :param color: 彩色模式 :param focus: 转换视频 :param kwargs: 视频分析配置 :return: 分析结果 """ self.step = kwargs.get("step", 1) self.block = kwargs.get("block", 6) self.threshold = kwargs.get("threshold", 0.97) self.offset = kwargs.get("threshold", 3) self.compress_rate = kwargs.get("compress_rate", 0.5) self.window_size = kwargs.get("window_size", 1) self.window_coefficient = kwargs.get("window_coefficient", 2) def validate(): screen_tag, screen_cap = None, None if os.path.isfile(self.report.video_path): screen = cv2.VideoCapture(self.report.video_path) if screen.isOpened(): screen_tag = os.path.basename(self.report.video_path) screen_cap = self.report.video_path screen.release() elif os.path.isdir(self.report.video_path): if len( file_list := [ file for file in os.listdir(self.report.video_path) if os.path.isfile( os.path.join(self.report.video_path, file) ) ] ) > 1 or len(file_list) == 1: screen = cv2.VideoCapture(os.path.join(self.report.video_path, file_list[0])) if screen.isOpened(): screen_tag = os.path.basename(file_list[0]) screen_cap = os.path.join(self.report.video_path, file_list[0]) screen.release() return screen_tag, screen_cap def frame_flip(): if focus: change_record = os.path.join( os.path.dirname(screen_record), f"screen_fps60_{random.randint(100, 999)}.mp4" ) asyncio.run(self.ffmpeg.video_change(screen_record, change_record)) logger.info(f"视频转换完成: {os.path.basename(change_record)}") os.remove(screen_record) logger.info(f"移除旧的视频: {os.path.basename(screen_record)}") else: change_record = screen_record video = VideoObject(change_record) task, hued = video.load_frames(color) return video, task, hued def frame_flow(): video, task, hued = frame_flip() classify = self.framix.pixel_wizard(video) important_frames: List["SingleClassifierResult"] = classify.get_important_frame_list() pbar = toolbox.show_progress(classify.get_length(), 50, "Faster") frames_list = [] if boost: frames_list.append(previous := important_frames[0]) pbar.update(1) for current in important_frames[1:]: frames_list.append(current) pbar.update(1) frames_diff = current.frame_id - previous.frame_id if not previous.is_stable() and not current.is_stable() and frames_diff > 1: for specially in classify.data[previous.frame_id: current.frame_id - 1]: frames_list.append(specially) pbar.update(1) previous = current pbar.close() else: for current in classify.data: frames_list.append(current) pbar.update(1) pbar.close() if color: video.hued_data = tuple(hued.result()) logger.info(f"彩色帧已加载: {video.frame_details(video.hued_data)}") task.shutdown() frames = [video.hued_data[frame.frame_id - 1] for frame in frames_list] else: frames = [frame for frame in frames_list] return classify, frames def frame_flick(classify): try: start_frame = classify.get_not_stable_stage_range()[0][1] end_frame = classify.get_not_stable_stage_range()[-1][-1] except AssertionError: start_frame = classify.get_important_frame_list()[0] end_frame = classify.get_important_frame_list()[-1] if start_frame == end_frame: start_frame = classify.data[0] end_frame = classify.data[-1] time_cost = end_frame.timestamp - start_frame.timestamp before, after, final = f"{start_frame.timestamp:.5f}", f"{end_frame.timestamp:.5f}", f"{time_cost:.5f}" logger.info(f"图像分类结果: [开始帧: {before}] [结束帧: {after}] [总耗时: {final}]") original_inform = self.report.draw( classifier_result=classify, proto_path=self.report.proto_path, target_size=Alynex.target_size ) result = { "total_path": self.report.total_path, "title": self.report.title, "query_path": self.report.query_path, "query": self.report.query, "stage": { "start": start_frame.frame_id, "end": end_frame.frame_id, "cost": f"{time_cost:.5f}" }, "frame": self.report.frame_path, "extra": self.report.extra_path, "proto": original_inform } logger.debug(f"Restore: {result}") self.report.load(result) return before, after, final
def frame_forge(frame: Union[SingleClassifierResult | Frame]):
14
2023-11-13 05:27:34+00:00
24k
deepseek-ai/DreamCraft3D
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n # 4D Gaussian Annealing\n anneal_density_blob_std_config: Optional[dict] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )\n\n # FIXME: use progressive normal eps\n def update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ) -> None:\n if self.cfg.anneal_density_blob_std_config is not None:\n min_step = self.cfg.anneal_density_blob_std_config.min_anneal_step\n max_step = self.cfg.anneal_density_blob_std_config.max_anneal_step\n if global_step >= min_step and global_step <= max_step:\n end_val = self.cfg.anneal_density_blob_std_config.end_val\n start_val = self.cfg.anneal_density_blob_std_config.start_val\n self.density_blob_std = start_val + (global_step - min_step) * (\n end_val - start_val\n ) / (max_step - min_step)\n\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n elif config.otype == \"HashGridSpatialTime\":\n encoding = TCNNEncodingSpatialTime(n_input_dims, config) # 4D-fy encoding\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,892
"+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitVolume):
4
2023-10-23 07:40:20+00:00
24k
microsoft/SoM
demo_gpt4v_som.py
[ { "identifier": "interactive_seem_m2m_auto", "path": "task_adapter/seem/tasks/interactive_seem_m2m_auto.py", "snippet": "def interactive_seem_m2m_auto(model, image, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n\n image_ori = np.asarray(image_ori)\n images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()\n\n mask_generator = SeemAutomaticMaskGenerator(model)\n outputs = mask_generator.generate(images)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n for ann in sorted_anns:\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n label += 1\n im = demo.get_image()\n\n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im" }, { "identifier": "inference_seem_pano", "path": "task_adapter/seem/tasks/inference_seem_pano.py", "snippet": "def inference_seem_pano(model, image, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n\n image_ori = np.asarray(image_ori)\n images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()\n\n orig_size = images.shape[-2:]\n orig_h, orig_w = orig_size\n crop_box = [0,0,orig_w,orig_h]\n\n data = {\"image\": images, \"height\": orig_h, \"width\": orig_w}\n batch_inputs = [data]\n\n model.model.metadata = metadata\n outputs = model.model.evaluate(batch_inputs)\n\n pano_mask = outputs[0]['panoptic_seg'][0]\n pano_info = outputs[0]['panoptic_seg'][1]\n\n masks = []\n for seg_info in pano_info:\n masks += [pano_mask == seg_info['id']]\n masks = torch.stack(masks, dim=0)\n iou_preds = torch.ones(masks.shape[0], dtype=torch.float32)\n points = torch.zeros((masks.shape[0], 2), dtype=torch.float32)\n\n mask_data = MaskData(\n masks=masks,\n iou_preds=iou_preds,\n points=points,\n )\n mask_data[\"stability_score\"] = torch.ones(masks.shape[0], dtype=torch.float32)\n del masks\n\n mask_data[\"boxes\"] = batched_mask_to_box(mask_data[\"masks\"])\n mask_data[\"crop_boxes\"] = torch.tensor([crop_box for _ in range(len(mask_data[\"boxes\"]))])\n\n # Compress to RLE\n mask_data[\"masks\"] = uncrop_masks(mask_data[\"masks\"], crop_box, orig_h, orig_w)\n mask_data[\"rles\"] = mask_to_rle_pytorch(mask_data[\"masks\"])\n del mask_data[\"masks\"]\n mask_data[\"segmentations\"] = [rle_to_mask(rle) for rle in mask_data[\"rles\"]]\n\n # Write mask records\n outputs = []\n for idx in range(len(mask_data[\"segmentations\"])):\n ann = {\n \"segmentation\": mask_data[\"segmentations\"][idx],\n \"area\": area_from_rle(mask_data[\"rles\"][idx]),\n \"bbox\": box_xyxy_to_xywh(mask_data[\"boxes\"][idx]).tolist(),\n \"predicted_iou\": mask_data[\"iou_preds\"][idx].item(),\n \"point_coords\": [mask_data[\"points\"][idx].tolist()],\n \"stability_score\": mask_data[\"stability_score\"][idx].item(),\n \"crop_box\": box_xyxy_to_xywh(mask_data[\"crop_boxes\"][idx]).tolist(),\n }\n outputs.append(ann)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n # create a full zero image as the image_orig\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n mask_map = np.zeros(image_ori.shape, dtype=np.uint8) \n for i, ann in enumerate(sorted_anns):\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # assign the mask to the mask_map\n mask_map[mask == 1] = label\n label += 1\n im = demo.get_image()\n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im, sorted_anns" }, { "identifier": "inference_seem_interactive", "path": "task_adapter/seem/tasks/inference_seem_interactive.py", "snippet": "def inference_seem_interactive(model, image, spatial_masks, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n\n image_ori = np.asarray(image_ori)\n images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()\n\n orig_size = images.shape[-2:]\n orig_h, orig_w = orig_size\n crop_box = [0,0,orig_w,orig_h]\n\n data = {\"image\": images, \"height\": orig_h, \"width\": orig_w}\n\n spatial_masks = spatial_masks[:, None].float().cuda()\n spatial_masks = F.interpolate(spatial_masks, size=(orig_h, orig_w), mode='bicubic', align_corners=False) > 0\n data['spatial_query'] = {'rand_shape': spatial_masks}\n\n model.model.metadata = metadata\n masks, _ = model.model.evaluate_demo([data])\n masks = masks > 0.0\n iou_preds = torch.ones(masks.shape[0], dtype=torch.float32)\n points = torch.zeros((masks.shape[0], 2), dtype=torch.float32)\n\n mask_data = MaskData(\n masks=masks,\n iou_preds=iou_preds,\n points=points,\n )\n\n mask_data[\"stability_score\"] = torch.ones(masks.shape[0], dtype=torch.float32)\n del masks\n\n mask_data[\"boxes\"] = batched_mask_to_box(mask_data[\"masks\"])\n mask_data[\"crop_boxes\"] = torch.tensor([crop_box for _ in range(len(mask_data[\"boxes\"]))])\n\n # Compress to RLE\n mask_data[\"masks\"] = uncrop_masks(mask_data[\"masks\"], crop_box, orig_h, orig_w)\n mask_data[\"rles\"] = mask_to_rle_pytorch(mask_data[\"masks\"])\n del mask_data[\"masks\"]\n mask_data[\"segmentations\"] = [rle_to_mask(rle) for rle in mask_data[\"rles\"]]\n\n # Write mask records\n outputs = []\n for idx in range(len(mask_data[\"segmentations\"])):\n ann = {\n \"segmentation\": mask_data[\"segmentations\"][idx],\n \"area\": area_from_rle(mask_data[\"rles\"][idx]),\n \"bbox\": box_xyxy_to_xywh(mask_data[\"boxes\"][idx]).tolist(),\n \"predicted_iou\": mask_data[\"iou_preds\"][idx].item(),\n \"point_coords\": [mask_data[\"points\"][idx].tolist()],\n \"stability_score\": mask_data[\"stability_score\"][idx].item(),\n \"crop_box\": box_xyxy_to_xywh(mask_data[\"crop_boxes\"][idx]).tolist(),\n }\n outputs.append(ann)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n # for ann in sorted_anns:\n # mask = ann['segmentation']\n # color_mask = np.random.random((1, 3)).tolist()[0]\n # # color_mask = [int(c*255) for c in color_mask]\n # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # label += 1\n # im = demo.get_image()\n\n mask_map = np.zeros(image_ori.shape, dtype=np.uint8) \n for i, ann in enumerate(sorted_anns):\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # assign the mask to the mask_map\n mask_map[mask == 1] = label\n label += 1\n im = demo.get_image()\n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im, sorted_anns" }, { "identifier": "inference_semsam_m2m_auto", "path": "task_adapter/semantic_sam/tasks/inference_semsam_m2m_auto.py", "snippet": "def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n\n image_ori = np.asarray(image_ori)\n images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()\n\n mask_generator = SemanticSamAutomaticMaskGenerator(model,points_per_side=32,\n pred_iou_thresh=0.88,\n stability_score_thresh=0.92,\n min_mask_region_area=10,\n level=level,\n )\n outputs = mask_generator.generate(images)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n # for ann in sorted_anns:\n # mask = ann['segmentation']\n # color_mask = np.random.random((1, 3)).tolist()[0]\n # # color_mask = [int(c*255) for c in color_mask]\n # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # label += 1\n # im = demo.get_image()\n\n mask_map = np.zeros(image_ori.shape, dtype=np.uint8) \n for i, ann in enumerate(sorted_anns):\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # assign the mask to the mask_map\n mask_map[mask == 1] = label\n label += 1\n im = demo.get_image() \n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im, sorted_anns" }, { "identifier": "prompt_switch", "path": "task_adapter/semantic_sam/tasks/automatic_mask_generator.py", "snippet": "def prompt_switch(p):\n p = int(p)\n if p == 1:\n return 3\n if p == 2:\n return 2\n if p == 3:\n return 0\n if p == 4:\n return 4\n if p == 5:\n return 1\n if p == 6:\n return 5\n else:\n raise NotImplementedError" }, { "identifier": "inference_sam_m2m_auto", "path": "task_adapter/sam/tasks/inference_sam_m2m_auto.py", "snippet": "def inference_sam_m2m_auto(model, image, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n image_ori = np.asarray(image_ori)\n\n mask_generator = SamAutomaticMaskGenerator(model)\n outputs = mask_generator.generate(image_ori)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n # for ann in sorted_anns:\n # mask = ann['segmentation']\n # color_mask = np.random.random((1, 3)).tolist()[0]\n # # color_mask = [int(c*255) for c in color_mask]\n # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # label += 1\n # im = demo.get_image()\n\n mask_map = np.zeros(image_ori.shape, dtype=np.uint8) \n for i, ann in enumerate(sorted_anns):\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # assign the mask to the mask_map\n mask_map[mask == 1] = label\n label += 1\n im = demo.get_image() \n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im, sorted_anns" }, { "identifier": "inference_sam_m2m_interactive", "path": "task_adapter/sam/tasks/inference_sam_m2m_interactive.py", "snippet": "def inference_sam_m2m_interactive(model, image, spatial_masks, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n\n image_ori = np.asarray(image_ori)\n images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()\n\n orig_size = images.shape[-2:]\n orig_h, orig_w = orig_size\n crop_box = [0,0,orig_w,orig_h]\n\n spatial_masks = spatial_masks[:, None].float().cuda()\n spatial_masks = F.interpolate(spatial_masks, size=(orig_h, orig_w), mode='bicubic', align_corners=False) > 0\n\n # generate single center point\n # n,_,h,w = spatial_masks.shape\n # mask_dt = (distance_transform((~F.pad(spatial_masks, pad=(1, 1, 1, 1), mode='constant', value=0)).float())[:,:,1:-1,1:-1]).reshape(n,-1)\n # max_xy_idx = torch.stack([torch.arange(n), mask_dt.max(dim=-1)[1].cpu()]).tolist()\n # next_mask = torch.zeros(spatial_masks.shape, device=torch.cuda.current_device()).bool()\n # next_mask = next_mask.view(n,-1)\n # next_mask[max_xy_idx] = True\n # next_mask = next_mask.reshape((n,1,h,w))\n # points = next_mask.nonzero()[:,2:].flip(dims=[1]).cpu().numpy()\n\n # stack sampled points\n acc_points = []\n for i in range(len(spatial_masks)):\n points = spatial_masks[i:i+1].nonzero()[:,2:].flip(dims=[1]).cpu().numpy()\n rand_ids = np.random.choice(points.shape[0], size=40, replace=True)\n points = points[rand_ids]\n acc_points.append(points)\n _np = len(acc_points)\n points = np.concatenate(acc_points)\n\n mask_generator = SamAutomaticMaskGenerator(model)\n mask_generator.predictor.set_image(image_ori)\n im_size = image_ori.shape[:-1]\n\n transformed_points = mask_generator.predictor.transform.apply_coords(points, im_size)\n in_points = torch.as_tensor(transformed_points, device=mask_generator.predictor.device).reshape(_np,-1,2).transpose(0,1)\n in_labels = torch.ones((in_points.shape[0], _np), dtype=torch.int, device=mask_generator.predictor.device)\n\n masks = sam_interactive_mask(mask_generator, points, in_points.transpose(0,1), in_labels.transpose(0,1), None)\n\n masks = masks > 0.0\n iou_preds = torch.ones(masks.shape[0], dtype=torch.float32)\n points = torch.zeros((masks.shape[0], 2), dtype=torch.float32)\n\n mask_data = MaskData(\n masks=masks,\n iou_preds=iou_preds,\n points=points,\n )\n\n mask_data[\"stability_score\"] = torch.ones(masks.shape[0], dtype=torch.float32)\n del masks\n\n mask_data[\"boxes\"] = batched_mask_to_box(mask_data[\"masks\"])\n mask_data[\"crop_boxes\"] = torch.tensor([crop_box for _ in range(len(mask_data[\"boxes\"]))])\n\n # Compress to RLE\n mask_data[\"masks\"] = uncrop_masks(mask_data[\"masks\"], crop_box, orig_h, orig_w)\n mask_data[\"rles\"] = mask_to_rle_pytorch(mask_data[\"masks\"])\n del mask_data[\"masks\"]\n mask_data[\"segmentations\"] = [rle_to_mask(rle) for rle in mask_data[\"rles\"]]\n\n # Write mask records\n outputs = []\n for idx in range(len(mask_data[\"segmentations\"])):\n ann = {\n \"segmentation\": mask_data[\"segmentations\"][idx],\n \"area\": area_from_rle(mask_data[\"rles\"][idx]),\n \"bbox\": box_xyxy_to_xywh(mask_data[\"boxes\"][idx]).tolist(),\n \"predicted_iou\": mask_data[\"iou_preds\"][idx].item(),\n \"point_coords\": [mask_data[\"points\"][idx].tolist()],\n \"stability_score\": mask_data[\"stability_score\"][idx].item(),\n \"crop_box\": box_xyxy_to_xywh(mask_data[\"crop_boxes\"][idx]).tolist(),\n }\n outputs.append(ann)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n # for ann in sorted_anns:\n # mask = ann['segmentation']\n # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # label += 1\n # im = demo.get_image()\n\n mask_map = np.zeros(image_ori.shape, dtype=np.uint8) \n for i, ann in enumerate(sorted_anns):\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # assign the mask to the mask_map\n mask_map[mask == 1] = label\n label += 1\n im = demo.get_image() \n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im, sorted_anns" }, { "identifier": "Visualizer", "path": "task_adapter/utils/visualizer.py", "snippet": "class Visualizer:\n \"\"\"\n Visualizer that draws data about detection/segmentation on images.\n\n It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`\n that draw primitive objects to images, as well as high-level wrappers like\n `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`\n that draw composite data in some pre-defined style.\n\n Note that the exact visualization style for the high-level wrappers are subject to change.\n Style such as color, opacity, label contents, visibility of labels, or even the visibility\n of objects themselves (e.g. when the object is too small) may change according\n to different heuristics, as long as the results still look visually reasonable.\n\n To obtain a consistent style, you can implement custom drawing functions with the\n abovementioned primitive methods instead. If you need more customized visualization\n styles, you can process the data yourself following their format documented in\n tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not\n intend to satisfy everyone's preference on drawing styles.\n\n This visualizer focuses on high rendering quality rather than performance. It is not\n designed to be used for real-time applications.\n \"\"\"\n\n # TODO implement a fast, rasterized version using OpenCV\n\n def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):\n \"\"\"\n Args:\n img_rgb: a numpy array of shape (H, W, C), where H and W correspond to\n the height and width of the image respectively. C is the number of\n color channels. The image is required to be in RGB format since that\n is a requirement of the Matplotlib library. The image is also expected\n to be in the range [0, 255].\n metadata (Metadata): dataset metadata (e.g. class names and colors)\n instance_mode (ColorMode): defines one of the pre-defined style for drawing\n instances on an image.\n \"\"\"\n self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)\n if metadata is None:\n metadata = MetadataCatalog.get(\"__nonexist__\")\n self.metadata = metadata\n self.output = VisImage(self.img, scale=scale)\n self.cpu_device = torch.device(\"cpu\")\n\n # too small texts are useless, therefore clamp to 9\n self._default_font_size = max(\n np.sqrt(self.output.height * self.output.width) // 90, 10 // scale\n )\n self._default_font_size = 18\n self._instance_mode = instance_mode\n self.keypoint_threshold = _KEYPOINT_THRESHOLD\n\n import matplotlib.colors as mcolors\n css4_colors = mcolors.CSS4_COLORS\n self.color_proposals = [list(mcolors.hex2color(color)) for color in css4_colors.values()]\n\n def draw_instance_predictions(self, predictions):\n \"\"\"\n Draw instance-level prediction results on an image.\n\n Args:\n predictions (Instances): the output of an instance detection/segmentation\n model. Following fields will be used to draw:\n \"pred_boxes\", \"pred_classes\", \"scores\", \"pred_masks\" (or \"pred_masks_rle\").\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n boxes = predictions.pred_boxes if predictions.has(\"pred_boxes\") else None\n scores = predictions.scores if predictions.has(\"scores\") else None\n classes = predictions.pred_classes.tolist() if predictions.has(\"pred_classes\") else None\n labels = _create_text_labels(classes, scores, self.metadata.get(\"thing_classes\", None))\n keypoints = predictions.pred_keypoints if predictions.has(\"pred_keypoints\") else None\n\n keep = (scores > 0.5).cpu()\n boxes = boxes[keep]\n scores = scores[keep]\n classes = np.array(classes)\n classes = classes[np.array(keep)]\n labels = np.array(labels)\n labels = labels[np.array(keep)]\n\n if predictions.has(\"pred_masks\"):\n masks = np.asarray(predictions.pred_masks)\n masks = masks[np.array(keep)]\n masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]\n else:\n masks = None\n\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n # if self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes\n ]\n alpha = 0.4\n else:\n colors = None\n alpha = 0.4\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.reset_image(\n self._create_grayscale_image(\n (predictions.pred_masks.any(dim=0) > 0).numpy()\n if predictions.has(\"pred_masks\")\n else None\n )\n )\n alpha = 0.3\n \n self.overlay_instances(\n masks=masks,\n boxes=boxes,\n labels=labels,\n keypoints=keypoints,\n assigned_colors=colors,\n alpha=alpha,\n )\n return self.output\n\n def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.7):\n \"\"\"\n Draw semantic segmentation predictions/labels.\n\n Args:\n sem_seg (Tensor or ndarray): the segmentation of shape (H, W).\n Each value is the integer label of the pixel.\n area_threshold (int): segments with less than `area_threshold` are not drawn.\n alpha (float): the larger it is, the more opaque the segmentations are.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n if isinstance(sem_seg, torch.Tensor):\n sem_seg = sem_seg.numpy()\n labels, areas = np.unique(sem_seg, return_counts=True)\n sorted_idxs = np.argsort(-areas).tolist()\n labels = labels[sorted_idxs]\n for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):\n try:\n mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]\n except (AttributeError, IndexError):\n mask_color = None\n\n binary_mask = (sem_seg == label).astype(np.uint8)\n text = self.metadata.stuff_classes[label]\n self.draw_binary_mask(\n binary_mask,\n color=mask_color,\n edge_color=_OFF_WHITE,\n text=text,\n alpha=alpha,\n area_threshold=area_threshold,\n )\n return self.output\n\n def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):\n \"\"\"\n Draw panoptic prediction annotations or results.\n\n Args:\n panoptic_seg (Tensor): of shape (height, width) where the values are ids for each\n segment.\n segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.\n If it is a ``list[dict]``, each dict contains keys \"id\", \"category_id\".\n If None, category id of each pixel is computed by\n ``pixel // metadata.label_divisor``.\n area_threshold (int): stuff segments with less than `area_threshold` are not drawn.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))\n\n # draw mask for all semantic segments first i.e. \"stuff\"\n for mask, sinfo in pred.semantic_masks():\n category_idx = sinfo[\"category_id\"]\n try:\n mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]\n except AttributeError:\n mask_color = None\n\n text = self.metadata.stuff_classes[category_idx].replace('-other','').replace('-merged','')\n self.draw_binary_mask(\n mask,\n color=mask_color,\n edge_color=_OFF_WHITE,\n text=text,\n alpha=alpha,\n area_threshold=area_threshold,\n )\n\n # draw mask for all instances second\n all_instances = list(pred.instance_masks())\n if len(all_instances) == 0:\n return self.output\n masks, sinfo = list(zip(*all_instances))\n category_ids = [x[\"category_id\"] for x in sinfo]\n\n try:\n scores = [x[\"score\"] for x in sinfo]\n except KeyError:\n scores = None\n class_names = [name.replace('-other','').replace('-merged','') for name in self.metadata.thing_classes]\n labels = _create_text_labels(\n category_ids, scores, class_names, [x.get(\"iscrowd\", 0) for x in sinfo]\n )\n\n try:\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids\n ]\n except AttributeError:\n colors = None\n self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)\n\n return self.output\n\n draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility\n\n def draw_dataset_dict(self, dic):\n \"\"\"\n Draw annotations/segmentaions in Detectron2 Dataset format.\n\n Args:\n dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n annos = dic.get(\"annotations\", None)\n if annos:\n if \"segmentation\" in annos[0]:\n masks = [x[\"segmentation\"] for x in annos]\n else:\n masks = None\n if \"keypoints\" in annos[0]:\n keypts = [x[\"keypoints\"] for x in annos]\n keypts = np.array(keypts).reshape(len(annos), -1, 3)\n else:\n keypts = None\n\n boxes = [\n BoxMode.convert(x[\"bbox\"], x[\"bbox_mode\"], BoxMode.XYXY_ABS)\n if len(x[\"bbox\"]) == 4\n else x[\"bbox\"]\n for x in annos\n ]\n\n colors = None\n category_ids = [x[\"category_id\"] for x in annos]\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]])\n for c in category_ids\n ]\n names = self.metadata.get(\"thing_classes\", None)\n labels = _create_text_labels(\n category_ids,\n scores=None,\n class_names=names,\n is_crowd=[x.get(\"iscrowd\", 0) for x in annos],\n )\n self.overlay_instances(\n labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors\n )\n\n sem_seg = dic.get(\"sem_seg\", None)\n if sem_seg is None and \"sem_seg_file_name\" in dic:\n with PathManager.open(dic[\"sem_seg_file_name\"], \"rb\") as f:\n sem_seg = Image.open(f)\n sem_seg = np.asarray(sem_seg, dtype=\"uint8\")\n if sem_seg is not None:\n self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.4)\n\n pan_seg = dic.get(\"pan_seg\", None)\n if pan_seg is None and \"pan_seg_file_name\" in dic:\n with PathManager.open(dic[\"pan_seg_file_name\"], \"rb\") as f:\n pan_seg = Image.open(f)\n pan_seg = np.asarray(pan_seg)\n from panopticapi.utils import rgb2id\n\n pan_seg = rgb2id(pan_seg)\n if pan_seg is not None:\n segments_info = dic[\"segments_info\"]\n pan_seg = torch.tensor(pan_seg)\n self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.7)\n return self.output\n\n def overlay_instances(\n self,\n *,\n boxes=None,\n labels=None,\n masks=None,\n keypoints=None,\n assigned_colors=None,\n alpha=0.5,\n ):\n \"\"\"\n Args:\n boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,\n or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,\n or a :class:`RotatedBoxes`,\n or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image,\n labels (list[str]): the text to be displayed for each instance.\n masks (masks-like object): Supported types are:\n\n * :class:`detectron2.structures.PolygonMasks`,\n :class:`detectron2.structures.BitMasks`.\n * list[list[ndarray]]: contains the segmentation masks for all objects in one image.\n The first level of the list corresponds to individual instances. The second\n level to all the polygon that compose the instance, and the third level\n to the polygon coordinates. The third level should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n * list[ndarray]: each ndarray is a binary mask of shape (H, W).\n * list[dict]: each dict is a COCO-style RLE.\n keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),\n where the N is the number of instances and K is the number of keypoints.\n The last dimension corresponds to (x, y, visibility or score).\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n num_instances = 0\n if boxes is not None:\n boxes = self._convert_boxes(boxes)\n num_instances = len(boxes)\n if masks is not None:\n masks = self._convert_masks(masks)\n if num_instances:\n assert len(masks) == num_instances\n else:\n num_instances = len(masks)\n if keypoints is not None:\n if num_instances:\n assert len(keypoints) == num_instances\n else:\n num_instances = len(keypoints)\n keypoints = self._convert_keypoints(keypoints)\n if labels is not None:\n assert len(labels) == num_instances\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n if boxes is not None and boxes.shape[1] == 5:\n return self.overlay_rotated_instances(\n boxes=boxes, labels=labels, assigned_colors=assigned_colors\n )\n\n # Display in largest to smallest order to reduce occlusion.\n areas = None\n if boxes is not None:\n areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)\n elif masks is not None:\n areas = np.asarray([x.area() for x in masks])\n\n if areas is not None:\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs] if boxes is not None else None\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None\n assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]\n keypoints = keypoints[sorted_idxs] if keypoints is not None else None\n\n for i in range(num_instances):\n color = assigned_colors[i]\n if boxes is not None:\n self.draw_box(boxes[i], edge_color=color)\n\n if masks is not None:\n for segment in masks[i].polygons:\n self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)\n\n if labels is not None:\n # first get a box\n if boxes is not None:\n x0, y0, x1, y1 = boxes[i]\n text_pos = (x0, y0) # if drawing boxes, put text on the box corner.\n horiz_align = \"left\"\n elif masks is not None:\n # skip small mask without polygon\n if len(masks[i].polygons) == 0:\n continue\n\n x0, y0, x1, y1 = masks[i].bbox()\n\n # draw text in the center (defined by median) when box is not drawn\n # median is less sensitive to outliers.\n text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]\n horiz_align = \"center\"\n else:\n continue # drawing the box confidence for keypoints isn't very useful.\n # for small objects, draw text at the side to avoid occlusion\n instance_area = (y1 - y0) * (x1 - x0)\n if (\n instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale\n or y1 - y0 < 40 * self.output.scale\n ):\n if y1 >= self.output.height - 5:\n text_pos = (x1, y0)\n else:\n text_pos = (x0, y1)\n\n height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)\n * 0.5\n * self._default_font_size\n )\n self.draw_text(\n labels[i],\n text_pos,\n color=lighter_color,\n horizontal_alignment=horiz_align,\n font_size=font_size,\n )\n\n # draw keypoints\n if keypoints is not None:\n for keypoints_per_instance in keypoints:\n self.draw_and_connect_keypoints(keypoints_per_instance)\n\n return self.output\n\n def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):\n \"\"\"\n Args:\n boxes (ndarray): an Nx5 numpy array of\n (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image.\n labels (list[str]): the text to be displayed for each instance.\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n num_instances = len(boxes)\n\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n\n # Display in largest to smallest order to reduce occlusion.\n if boxes is not None:\n areas = boxes[:, 2] * boxes[:, 3]\n\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs]\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n colors = [assigned_colors[idx] for idx in sorted_idxs]\n\n for i in range(num_instances):\n self.draw_rotated_box_with_label(\n boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None\n )\n\n return self.output\n\n def draw_and_connect_keypoints(self, keypoints):\n \"\"\"\n Draws keypoints of an instance and follows the rules for keypoint connections\n to draw lines between appropriate keypoints. This follows color heuristics for\n line color.\n\n Args:\n keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints\n and the last dimension corresponds to (x, y, probability).\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n visible = {}\n keypoint_names = self.metadata.get(\"keypoint_names\")\n for idx, keypoint in enumerate(keypoints):\n\n # draw keypoint\n x, y, prob = keypoint\n if prob > self.keypoint_threshold:\n self.draw_circle((x, y), color=_RED)\n if keypoint_names:\n keypoint_name = keypoint_names[idx]\n visible[keypoint_name] = (x, y)\n\n if self.metadata.get(\"keypoint_connection_rules\"):\n for kp0, kp1, color in self.metadata.keypoint_connection_rules:\n if kp0 in visible and kp1 in visible:\n x0, y0 = visible[kp0]\n x1, y1 = visible[kp1]\n color = tuple(x / 255.0 for x in color)\n self.draw_line([x0, x1], [y0, y1], color=color)\n\n # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip\n # Note that this strategy is specific to person keypoints.\n # For other keypoints, it should just do nothing\n try:\n ls_x, ls_y = visible[\"left_shoulder\"]\n rs_x, rs_y = visible[\"right_shoulder\"]\n mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2\n except KeyError:\n pass\n else:\n # draw line from nose to mid-shoulder\n nose_x, nose_y = visible.get(\"nose\", (None, None))\n if nose_x is not None:\n self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)\n\n try:\n # draw line from mid-shoulder to mid-hip\n lh_x, lh_y = visible[\"left_hip\"]\n rh_x, rh_y = visible[\"right_hip\"]\n except KeyError:\n pass\n else:\n mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2\n self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)\n return self.output\n\n \"\"\"\n Primitive drawing functions:\n \"\"\"\n\n def draw_text(\n self,\n text,\n position,\n *,\n font_size=None,\n color=\"g\",\n horizontal_alignment=\"center\",\n rotation=0,\n ):\n \"\"\"\n Args:\n text (str): class label\n position (tuple): a tuple of the x and y coordinates to place text on image.\n font_size (int, optional): font of the text. If not provided, a font size\n proportional to the image width is calculated and used.\n color: color of the text. Refer to `matplotlib.colors` for full list\n of formats that are accepted.\n horizontal_alignment (str): see `matplotlib.text.Text`\n rotation: rotation angle in degrees CCW\n\n Returns:\n output (VisImage): image object with text drawn.\n \"\"\"\n if not font_size:\n font_size = self._default_font_size\n\n # since the text background is dark, we don't want the text to be dark\n color = np.maximum(list(mplc.to_rgb(color)), 0.15)\n color[np.argmax(color)] = max(0.8, np.max(color))\n\n def contrasting_color(rgb):\n \"\"\"Returns 'white' or 'black' depending on which color contrasts more with the given RGB value.\"\"\"\n \n # Decompose the RGB tuple\n R, G, B = rgb\n\n # Calculate the Y value\n Y = 0.299 * R + 0.587 * G + 0.114 * B\n\n # If Y value is greater than 128, it's closer to white so return black. Otherwise, return white.\n return 'black' if Y > 128 else 'white'\n\n bbox_background = contrasting_color(color*255)\n\n x, y = position\n self.output.ax.text(\n x,\n y,\n text,\n size=font_size * self.output.scale,\n family=\"sans-serif\",\n bbox={\"facecolor\": bbox_background, \"alpha\": 0.8, \"pad\": 0.7, \"edgecolor\": \"none\"},\n verticalalignment=\"top\",\n horizontalalignment=horizontal_alignment,\n color=color,\n zorder=10,\n rotation=rotation,\n )\n return self.output\n\n def draw_box(self, box_coord, alpha=0.5, edge_color=\"g\", line_style=\"-\"):\n \"\"\"\n Args:\n box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0\n are the coordinates of the image's top left corner. x1 and y1 are the\n coordinates of the image's bottom right corner.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n edge_color: color of the outline of the box. Refer to `matplotlib.colors`\n for full list of formats that are accepted.\n line_style (string): the string to use to create the outline of the boxes.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n x0, y0, x1, y1 = box_coord\n width = x1 - x0\n height = y1 - y0\n\n linewidth = max(self._default_font_size / 12, 1)\n\n self.output.ax.add_patch(\n mpl.patches.Rectangle(\n (x0, y0),\n width,\n height,\n fill=False,\n edgecolor=edge_color,\n linewidth=linewidth * self.output.scale,\n alpha=alpha,\n linestyle=line_style,\n )\n )\n return self.output\n\n def draw_rotated_box_with_label(\n self, rotated_box, alpha=0.5, edge_color=\"g\", line_style=\"-\", label=None\n ):\n \"\"\"\n Draw a rotated box with label on its top-left corner.\n\n Args:\n rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),\n where cnt_x and cnt_y are the center coordinates of the box.\n w and h are the width and height of the box. angle represents how\n many degrees the box is rotated CCW with regard to the 0-degree box.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n edge_color: color of the outline of the box. Refer to `matplotlib.colors`\n for full list of formats that are accepted.\n line_style (string): the string to use to create the outline of the boxes.\n label (string): label for rotated box. It will not be rendered when set to None.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n cnt_x, cnt_y, w, h, angle = rotated_box\n area = w * h\n # use thinner lines when the box is small\n linewidth = self._default_font_size / (\n 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3\n )\n\n theta = angle * math.pi / 180.0\n c = math.cos(theta)\n s = math.sin(theta)\n rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]\n # x: left->right ; y: top->down\n rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]\n for k in range(4):\n j = (k + 1) % 4\n self.draw_line(\n [rotated_rect[k][0], rotated_rect[j][0]],\n [rotated_rect[k][1], rotated_rect[j][1]],\n color=edge_color,\n linestyle=\"--\" if k == 1 else line_style,\n linewidth=linewidth,\n )\n\n if label is not None:\n text_pos = rotated_rect[1] # topleft corner\n\n height_ratio = h / np.sqrt(self.output.height * self.output.width)\n label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size\n )\n self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)\n\n return self.output\n\n def draw_circle(self, circle_coord, color, radius=3):\n \"\"\"\n Args:\n circle_coord (list(int) or tuple(int)): contains the x and y coordinates\n of the center of the circle.\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n radius (int): radius of the circle.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n x, y = circle_coord\n self.output.ax.add_patch(\n mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)\n )\n return self.output\n\n def draw_line(self, x_data, y_data, color, linestyle=\"-\", linewidth=None):\n \"\"\"\n Args:\n x_data (list[int]): a list containing x values of all the points being drawn.\n Length of list should match the length of y_data.\n y_data (list[int]): a list containing y values of all the points being drawn.\n Length of list should match the length of x_data.\n color: color of the line. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n linestyle: style of the line. Refer to `matplotlib.lines.Line2D`\n for a full list of formats that are accepted.\n linewidth (float or None): width of the line. When it's None,\n a default value will be computed and used.\n\n Returns:\n output (VisImage): image object with line drawn.\n \"\"\"\n if linewidth is None:\n linewidth = self._default_font_size / 3\n linewidth = max(linewidth, 1)\n self.output.ax.add_line(\n mpl.lines.Line2D(\n x_data,\n y_data,\n linewidth=linewidth * self.output.scale,\n color=color,\n linestyle=linestyle,\n )\n )\n return self.output\n\n def draw_binary_mask(\n self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.7, area_threshold=10\n ):\n \"\"\"\n Args:\n binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and\n W is the image width. Each value in the array is either a 0 or 1 value of uint8\n type.\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n area_threshold (float): a connected component smaller than this area will not be shown.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n color = random_color(rgb=True, maximum=1)\n color = mplc.to_rgb(color)\n\n has_valid_segment = False\n binary_mask = binary_mask.astype(\"uint8\") # opencv needs uint8\n mask = GenericMask(binary_mask, self.output.height, self.output.width)\n shape2d = (binary_mask.shape[0], binary_mask.shape[1])\n\n if not mask.has_holes:\n # draw polygons for regular masks\n for segment in mask.polygons:\n area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))\n if area < (area_threshold or 0):\n continue\n has_valid_segment = True\n segment = segment.reshape(-1, 2)\n self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)\n else:\n # TODO: Use Path/PathPatch to draw vector graphics:\n # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = (mask.mask == 1).astype(\"float32\") * alpha\n has_valid_segment = True\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if text is not None and has_valid_segment:\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n self._draw_text_in_mask(binary_mask, text, lighter_color)\n return self.output\n \n def draw_binary_mask_with_number(\n self, binary_mask, color=None, *, edge_color=None, text=None, label_mode='1', alpha=0.1, anno_mode=['Mask'], area_threshold=10\n ):\n \"\"\"\n Args:\n binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and\n W is the image width. Each value in the array is either a 0 or 1 value of uint8\n type.\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n area_threshold (float): a connected component smaller than this area will not be shown.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n randint = random.randint(0, len(self.color_proposals)-1)\n color = self.color_proposals[randint]\n color = mplc.to_rgb(color)\n\n has_valid_segment = True\n binary_mask = binary_mask.astype(\"uint8\") # opencv needs uint8\n mask = GenericMask(binary_mask, self.output.height, self.output.width)\n shape2d = (binary_mask.shape[0], binary_mask.shape[1])\n bbox = mask.bbox()\n\n if 'Mask' in anno_mode:\n if not mask.has_holes:\n # draw polygons for regular masks\n for segment in mask.polygons:\n area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))\n if area < (area_threshold or 0):\n continue\n has_valid_segment = True\n segment = segment.reshape(-1, 2)\n self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)\n else:\n # TODO: Use Path/PathPatch to draw vector graphics:\n # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = (mask.mask == 1).astype(\"float32\") * alpha\n has_valid_segment = True\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if 'Box' in anno_mode:\n self.draw_box(bbox, edge_color=color, alpha=0.75)\n\n if 'Mark' in anno_mode:\n has_valid_segment = True\n else:\n has_valid_segment = False\n\n if text is not None and has_valid_segment:\n # lighter_color = tuple([x*0.2 for x in color])\n lighter_color = [1,1,1] # self._change_color_brightness(color, brightness_factor=0.7)\n self._draw_number_in_mask(binary_mask, text, lighter_color, label_mode)\n return self.output\n\n def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):\n \"\"\"\n Args:\n soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n color = random_color(rgb=True, maximum=1)\n color = mplc.to_rgb(color)\n\n shape2d = (soft_mask.shape[0], soft_mask.shape[1])\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = soft_mask * alpha\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if text is not None:\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n binary_mask = (soft_mask > 0.5).astype(\"uint8\")\n self._draw_text_in_mask(binary_mask, text, lighter_color)\n return self.output\n\n def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):\n \"\"\"\n Args:\n segment: numpy array of shape Nx2, containing all the points in the polygon.\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted. If not provided, a darker shade\n of the polygon color will be used instead.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n\n Returns:\n output (VisImage): image object with polygon drawn.\n \"\"\"\n if edge_color is None:\n # make edge color darker than the polygon color\n if alpha > 0.8:\n edge_color = self._change_color_brightness(color, brightness_factor=-0.7)\n else:\n edge_color = color\n edge_color = mplc.to_rgb(edge_color) + (1,)\n\n polygon = mpl.patches.Polygon(\n segment,\n fill=True,\n facecolor=mplc.to_rgb(color) + (alpha,),\n edgecolor=edge_color,\n linewidth=max(self._default_font_size // 15 * self.output.scale, 1),\n )\n self.output.ax.add_patch(polygon)\n return self.output\n\n \"\"\"\n Internal methods:\n \"\"\"\n\n def _jitter(self, color):\n \"\"\"\n Randomly modifies given color to produce a slightly different color than the color given.\n\n Args:\n color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color\n picked. The values in the list are in the [0.0, 1.0] range.\n\n Returns:\n jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the\n color after being jittered. The values in the list are in the [0.0, 1.0] range.\n \"\"\"\n color = mplc.to_rgb(color)\n # np.random.seed(0)\n vec = np.random.rand(3)\n # better to do it in another color space\n vec = vec / np.linalg.norm(vec) * 0.5\n res = np.clip(vec + color, 0, 1)\n return tuple(res)\n\n def _create_grayscale_image(self, mask=None):\n \"\"\"\n Create a grayscale version of the original image.\n The colors in masked area, if given, will be kept.\n \"\"\"\n img_bw = self.img.astype(\"f4\").mean(axis=2)\n img_bw = np.stack([img_bw] * 3, axis=2)\n if mask is not None:\n img_bw[mask] = self.img[mask]\n return img_bw\n\n def _change_color_brightness(self, color, brightness_factor):\n \"\"\"\n Depending on the brightness_factor, gives a lighter or darker color i.e. a color with\n less or more saturation than the original color.\n\n Args:\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of\n 0 will correspond to no change, a factor in [-1.0, 0) range will result in\n a darker color and a factor in (0, 1.0] range will result in a lighter color.\n\n Returns:\n modified_color (tuple[double]): a tuple containing the RGB values of the\n modified color. Each value in the tuple is in the [0.0, 1.0] range.\n \"\"\"\n assert brightness_factor >= -1.0 and brightness_factor <= 1.0\n color = mplc.to_rgb(color)\n polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))\n modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])\n modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness\n modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness\n modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])\n return modified_color\n\n def _convert_boxes(self, boxes):\n \"\"\"\n Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.\n \"\"\"\n if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):\n return boxes.tensor.detach().numpy()\n else:\n return np.asarray(boxes)\n\n def _convert_masks(self, masks_or_polygons):\n \"\"\"\n Convert different format of masks or polygons to a tuple of masks and polygons.\n\n Returns:\n list[GenericMask]:\n \"\"\"\n\n m = masks_or_polygons\n if isinstance(m, PolygonMasks):\n m = m.polygons\n if isinstance(m, BitMasks):\n m = m.tensor.numpy()\n if isinstance(m, torch.Tensor):\n m = m.numpy()\n ret = []\n for x in m:\n if isinstance(x, GenericMask):\n ret.append(x)\n else:\n ret.append(GenericMask(x, self.output.height, self.output.width))\n return ret\n\n def _draw_number_in_mask(self, binary_mask, text, color, label_mode='1'):\n \"\"\"\n Find proper places to draw text given a binary mask.\n \"\"\"\n\n def number_to_string(n):\n chars = []\n while n:\n n, remainder = divmod(n-1, 26)\n chars.append(chr(97 + remainder))\n return ''.join(reversed(chars))\n\n binary_mask = np.pad(binary_mask, ((1, 1), (1, 1)), 'constant')\n mask_dt = cv2.distanceTransform(binary_mask, cv2.DIST_L2, 0)\n mask_dt = mask_dt[1:-1, 1:-1]\n max_dist = np.max(mask_dt)\n coords_y, coords_x = np.where(mask_dt == max_dist) # coords is [y, x]\n\n if label_mode == 'a':\n text = number_to_string(int(text))\n else:\n text = text\n\n self.draw_text(text, (coords_x[len(coords_x)//2] + 2, coords_y[len(coords_y)//2] - 6), color=color)\n\n # TODO sometimes drawn on wrong objects. the heuristics here can improve.\n # _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)\n # if stats[1:, -1].size == 0:\n # return\n # largest_component_id = np.argmax(stats[1:, -1]) + 1\n\n # # draw text on the largest component, as well as other very large components.\n # for cid in range(1, _num_cc):\n # if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:\n # # median is more stable than centroid\n # # center = centroids[largest_component_id]\n # center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]\n # # bottom=np.max((cc_labels == cid).nonzero(), axis=1)[::-1]\n # # center[1]=bottom[1]+2\n # self.draw_text(text, center, color=color)\n \n def _draw_text_in_mask(self, binary_mask, text, color):\n \"\"\"\n Find proper places to draw text given a binary mask.\n \"\"\"\n # TODO sometimes drawn on wrong objects. the heuristics here can improve.\n _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)\n if stats[1:, -1].size == 0:\n return\n largest_component_id = np.argmax(stats[1:, -1]) + 1\n\n # draw text on the largest component, as well as other very large components.\n for cid in range(1, _num_cc):\n if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:\n # median is more stable than centroid\n # center = centroids[largest_component_id]\n center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]\n bottom=np.max((cc_labels == cid).nonzero(), axis=1)[::-1]\n center[1]=bottom[1]+2\n self.draw_text(text, center, color=color)\n\n def _convert_keypoints(self, keypoints):\n if isinstance(keypoints, Keypoints):\n keypoints = keypoints.tensor\n keypoints = np.asarray(keypoints)\n return keypoints\n\n def get_output(self):\n \"\"\"\n Returns:\n output (VisImage): the image output containing the visualizations added\n to the image.\n \"\"\"\n return self.output" }, { "identifier": "request_gpt4v", "path": "gpt4v.py", "snippet": "def request_gpt4v(message, image):\n payload = prepare_inputs(message, image)\n response = requests.post(\"https://api.openai.com/v1/chat/completions\", headers=headers, json=payload)\n res = response.json()['choices'][0]['message']['content']\n return res" } ]
import io import gradio as gr import torch import argparse import numpy as np import matplotlib.colors as mcolors from PIL import Image from seem.modeling.BaseModel import BaseModel as BaseModel_Seem from seem.utils.distributed import init_distributed as init_distributed_seem from seem.modeling import build_model as build_model_seem from task_adapter.seem.tasks import interactive_seem_m2m_auto, inference_seem_pano, inference_seem_interactive from semantic_sam.BaseModel import BaseModel from semantic_sam import build_model from semantic_sam.utils.dist import init_distributed_mode from semantic_sam.utils.arguments import load_opt_from_config_file from semantic_sam.utils.constants import COCO_PANOPTIC_CLASSES from task_adapter.semantic_sam.tasks import inference_semsam_m2m_auto, prompt_switch from segment_anything import sam_model_registry from task_adapter.sam.tasks.inference_sam_m2m_auto import inference_sam_m2m_auto from task_adapter.sam.tasks.inference_sam_m2m_interactive import inference_sam_m2m_interactive from task_adapter.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog from scipy.ndimage import label from gpt4v import request_gpt4v from openai import OpenAI from pydub import AudioSegment from pydub.playback import play
18,067
# Jianwei Yang ([email protected]) # Xueyan Zou ([email protected]) # Hao Zhang ([email protected]) # -------------------------------------------------------- # seem # semantic sam # sam metadata = MetadataCatalog.get('coco_2017_train_panoptic') css4_colors = mcolors.CSS4_COLORS color_proposals = [list(mcolors.hex2color(color)) for color in css4_colors.values()] client = OpenAI() ''' build args ''' semsam_cfg = "configs/semantic_sam_only_sa-1b_swinL.yaml" seem_cfg = "configs/seem_focall_unicl_lang_v1.yaml" semsam_ckpt = "./swinl_only_sam_many2many.pth" sam_ckpt = "./sam_vit_h_4b8939.pth" seem_ckpt = "./seem_focall_v1.pt" opt_semsam = load_opt_from_config_file(semsam_cfg) opt_seem = load_opt_from_config_file(seem_cfg) opt_seem = init_distributed_seem(opt_seem) ''' build model ''' model_semsam = BaseModel(opt_semsam, build_model(opt_semsam)).from_pretrained(semsam_ckpt).eval().cuda() model_sam = sam_model_registry["vit_h"](checkpoint=sam_ckpt).eval().cuda() model_seem = BaseModel_Seem(opt_seem, build_model_seem(opt_seem)).from_pretrained(seem_ckpt).eval().cuda() with torch.no_grad(): with torch.autocast(device_type='cuda', dtype=torch.float16): model_seem.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(COCO_PANOPTIC_CLASSES + ["background"], is_eval=True) history_images = [] history_masks = [] history_texts = [] @torch.no_grad() def inference(image, slider, mode, alpha, label_mode, anno_mode, *args, **kwargs): global history_images; history_images = [] global history_masks; history_masks = [] if slider < 1.5: model_name = 'seem' elif slider > 2.5: model_name = 'sam' else: if mode == 'Automatic': model_name = 'semantic-sam' if slider < 1.5 + 0.14: level = [1] elif slider < 1.5 + 0.28: level = [2] elif slider < 1.5 + 0.42: level = [3] elif slider < 1.5 + 0.56: level = [4] elif slider < 1.5 + 0.70: level = [5] elif slider < 1.5 + 0.84: level = [6] else: level = [6, 1, 2, 3, 4, 5] else: model_name = 'sam' if label_mode == 'Alphabet': label_mode = 'a' else: label_mode = '1' text_size, hole_scale, island_scale=640,100,100 text, text_part, text_thresh = '','','0.0' with torch.autocast(device_type='cuda', dtype=torch.float16): semantic=False if mode == "Interactive": labeled_array, num_features = label(np.asarray(image['mask'].convert('L'))) spatial_masks = torch.stack([torch.from_numpy(labeled_array == i+1) for i in range(num_features)]) if model_name == 'semantic-sam': model = model_semsam output, mask = inference_semsam_m2m_auto(model, image['image'], level, text, text_part, text_thresh, text_size, hole_scale, island_scale, semantic, label_mode=label_mode, alpha=alpha, anno_mode=anno_mode, *args, **kwargs) elif model_name == 'sam': model = model_sam if mode == "Automatic": output, mask = inference_sam_m2m_auto(model, image['image'], text_size, label_mode, alpha, anno_mode) elif mode == "Interactive": output, mask = inference_sam_m2m_interactive(model, image['image'], spatial_masks, text_size, label_mode, alpha, anno_mode) elif model_name == 'seem': model = model_seem if mode == "Automatic": output, mask = inference_seem_pano(model, image['image'], text_size, label_mode, alpha, anno_mode) elif mode == "Interactive": output, mask = inference_seem_interactive(model, image['image'], spatial_masks, text_size, label_mode, alpha, anno_mode) # convert output to PIL image history_masks.append(mask) history_images.append(Image.fromarray(output)) return (output, []) def gpt4v_response(message, history): global history_images global history_texts; history_texts = [] try:
# -------------------------------------------------------- # Set-of-Mark (SoM) Prompting for Visual Grounding in GPT-4V # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by: # Jianwei Yang ([email protected]) # Xueyan Zou ([email protected]) # Hao Zhang ([email protected]) # -------------------------------------------------------- # seem # semantic sam # sam metadata = MetadataCatalog.get('coco_2017_train_panoptic') css4_colors = mcolors.CSS4_COLORS color_proposals = [list(mcolors.hex2color(color)) for color in css4_colors.values()] client = OpenAI() ''' build args ''' semsam_cfg = "configs/semantic_sam_only_sa-1b_swinL.yaml" seem_cfg = "configs/seem_focall_unicl_lang_v1.yaml" semsam_ckpt = "./swinl_only_sam_many2many.pth" sam_ckpt = "./sam_vit_h_4b8939.pth" seem_ckpt = "./seem_focall_v1.pt" opt_semsam = load_opt_from_config_file(semsam_cfg) opt_seem = load_opt_from_config_file(seem_cfg) opt_seem = init_distributed_seem(opt_seem) ''' build model ''' model_semsam = BaseModel(opt_semsam, build_model(opt_semsam)).from_pretrained(semsam_ckpt).eval().cuda() model_sam = sam_model_registry["vit_h"](checkpoint=sam_ckpt).eval().cuda() model_seem = BaseModel_Seem(opt_seem, build_model_seem(opt_seem)).from_pretrained(seem_ckpt).eval().cuda() with torch.no_grad(): with torch.autocast(device_type='cuda', dtype=torch.float16): model_seem.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(COCO_PANOPTIC_CLASSES + ["background"], is_eval=True) history_images = [] history_masks = [] history_texts = [] @torch.no_grad() def inference(image, slider, mode, alpha, label_mode, anno_mode, *args, **kwargs): global history_images; history_images = [] global history_masks; history_masks = [] if slider < 1.5: model_name = 'seem' elif slider > 2.5: model_name = 'sam' else: if mode == 'Automatic': model_name = 'semantic-sam' if slider < 1.5 + 0.14: level = [1] elif slider < 1.5 + 0.28: level = [2] elif slider < 1.5 + 0.42: level = [3] elif slider < 1.5 + 0.56: level = [4] elif slider < 1.5 + 0.70: level = [5] elif slider < 1.5 + 0.84: level = [6] else: level = [6, 1, 2, 3, 4, 5] else: model_name = 'sam' if label_mode == 'Alphabet': label_mode = 'a' else: label_mode = '1' text_size, hole_scale, island_scale=640,100,100 text, text_part, text_thresh = '','','0.0' with torch.autocast(device_type='cuda', dtype=torch.float16): semantic=False if mode == "Interactive": labeled_array, num_features = label(np.asarray(image['mask'].convert('L'))) spatial_masks = torch.stack([torch.from_numpy(labeled_array == i+1) for i in range(num_features)]) if model_name == 'semantic-sam': model = model_semsam output, mask = inference_semsam_m2m_auto(model, image['image'], level, text, text_part, text_thresh, text_size, hole_scale, island_scale, semantic, label_mode=label_mode, alpha=alpha, anno_mode=anno_mode, *args, **kwargs) elif model_name == 'sam': model = model_sam if mode == "Automatic": output, mask = inference_sam_m2m_auto(model, image['image'], text_size, label_mode, alpha, anno_mode) elif mode == "Interactive": output, mask = inference_sam_m2m_interactive(model, image['image'], spatial_masks, text_size, label_mode, alpha, anno_mode) elif model_name == 'seem': model = model_seem if mode == "Automatic": output, mask = inference_seem_pano(model, image['image'], text_size, label_mode, alpha, anno_mode) elif mode == "Interactive": output, mask = inference_seem_interactive(model, image['image'], spatial_masks, text_size, label_mode, alpha, anno_mode) # convert output to PIL image history_masks.append(mask) history_images.append(Image.fromarray(output)) return (output, []) def gpt4v_response(message, history): global history_images global history_texts; history_texts = [] try:
res = request_gpt4v(message, history_images[0])
8
2023-10-16 03:39:26+00:00
24k
hkchengrex/Cutie
gui/main_controller.py
[ { "identifier": "CUTIE", "path": "cutie/model/cutie.py", "snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_cfg.key_dim\n self.value_dim = model_cfg.value_dim\n self.sensory_dim = model_cfg.sensory_dim\n self.pixel_dim = model_cfg.pixel_dim\n self.embed_dim = model_cfg.embed_dim\n self.single_object = single_object\n\n log.info(f'Single object: {self.single_object}')\n\n self.pixel_encoder = PixelEncoder(model_cfg)\n self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1)\n self.key_proj = KeyProjection(model_cfg)\n self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object)\n self.mask_decoder = MaskDecoder(model_cfg)\n self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object)\n self.object_transformer = QueryTransformer(model_cfg)\n self.object_summarizer = ObjectSummarizer(model_cfg)\n self.aux_computer = AuxComputer(cfg)\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(model_cfg.pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(model_cfg.pixel_std).view(-1, 1, 1), False)\n\n def _get_others(self, masks: torch.Tensor) -> torch.Tensor:\n # for each object, return the sum of masks of all other objects\n if self.single_object:\n return None\n\n num_objects = masks.shape[1]\n if num_objects >= 1:\n others = (masks.sum(dim=1, keepdim=True) - masks).clamp(0, 1)\n else:\n others = torch.zeros_like(masks)\n return others\n\n def encode_image(self, image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n ms_image_feat = self.pixel_encoder(image)\n return ms_image_feat, self.pix_feat_proj(ms_image_feat[0])\n\n def encode_mask(\n self,\n image: torch.Tensor,\n ms_features: List[torch.Tensor],\n sensory: torch.Tensor,\n masks: torch.Tensor,\n *,\n deep_update: bool = True,\n chunk_size: int = -1,\n need_weights: bool = False) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n others = self._get_others(masks)\n mask_value, new_sensory = self.mask_encoder(image,\n ms_features,\n sensory,\n masks,\n others,\n deep_update=deep_update,\n chunk_size=chunk_size)\n object_summaries, object_logits = self.object_summarizer(masks, mask_value, need_weights)\n return mask_value, new_sensory, object_summaries, object_logits\n\n def transform_key(self,\n final_pix_feat: torch.Tensor,\n *,\n need_sk: bool = True,\n need_ek: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n key, shrinkage, selection = self.key_proj(final_pix_feat, need_s=need_sk, need_e=need_ek)\n return key, shrinkage, selection\n\n # Used in training only.\n # This step is replaced by MemoryManager in test time\n def read_memory(self, query_key: torch.Tensor, query_selection: torch.Tensor,\n memory_key: torch.Tensor, memory_shrinkage: torch.Tensor,\n msk_value: torch.Tensor, obj_memory: torch.Tensor, pix_feat: torch.Tensor,\n sensory: torch.Tensor, last_mask: torch.Tensor,\n selector: torch.Tensor) -> (torch.Tensor, Dict[str, torch.Tensor]):\n \"\"\"\n query_key : B * CK * H * W\n query_selection : B * CK * H * W\n memory_key : B * CK * T * H * W\n memory_shrinkage: B * 1 * T * H * W\n msk_value : B * num_objects * CV * T * H * W\n obj_memory : B * num_objects * T * num_summaries * C\n pixel_feature : B * C * H * W\n \"\"\"\n batch_size, num_objects = msk_value.shape[:2]\n\n # read using visual attention\n with torch.cuda.amp.autocast(enabled=False):\n affinity = get_affinity(memory_key.float(), memory_shrinkage.float(), query_key.float(),\n query_selection.float())\n\n msk_value = msk_value.flatten(start_dim=1, end_dim=2).float()\n\n # B * (num_objects*CV) * H * W\n pixel_readout = readout(affinity, msk_value)\n pixel_readout = pixel_readout.view(batch_size, num_objects, self.value_dim,\n *pixel_readout.shape[-2:])\n pixel_readout = self.pixel_fusion(pix_feat, pixel_readout, sensory, last_mask)\n\n # read from query transformer\n mem_readout, aux_features = self.readout_query(pixel_readout, obj_memory, selector=selector)\n\n aux_output = {\n 'sensory': sensory,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'] if aux_features else None,\n }\n\n return mem_readout, aux_output\n\n def pixel_fusion(self,\n pix_feat: torch.Tensor,\n pixel: torch.Tensor,\n sensory: torch.Tensor,\n last_mask: torch.Tensor,\n *,\n chunk_size: int = -1) -> torch.Tensor:\n last_mask = F.interpolate(last_mask, size=sensory.shape[-2:], mode='area')\n last_others = self._get_others(last_mask)\n fused = self.pixel_fuser(pix_feat,\n pixel,\n sensory,\n last_mask,\n last_others,\n chunk_size=chunk_size)\n return fused\n\n def readout_query(self,\n pixel_readout,\n obj_memory,\n *,\n selector=None,\n need_weights=False) -> (torch.Tensor, Dict[str, torch.Tensor]):\n return self.object_transformer(pixel_readout,\n obj_memory,\n selector=selector,\n need_weights=need_weights)\n\n def segment(self,\n ms_image_feat: List[torch.Tensor],\n memory_readout: torch.Tensor,\n sensory: torch.Tensor,\n *,\n selector: bool = None,\n chunk_size: int = -1,\n update_sensory: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n \"\"\"\n multi_scale_features is from the key encoder for skip-connection\n memory_readout is from working/long-term memory\n sensory is the sensory memory\n last_mask is the mask from the last frame, supplementing sensory memory\n selector is 1 if an object exists, and 0 otherwise. We use it to filter padded objects\n during training.\n \"\"\"\n sensory, logits = self.mask_decoder(ms_image_feat,\n memory_readout,\n sensory,\n chunk_size=chunk_size,\n update_sensory=update_sensory)\n\n prob = torch.sigmoid(logits)\n if selector is not None:\n prob = prob * selector\n\n # Softmax over all objects[]\n logits = aggregate(prob, dim=1)\n logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=False)\n prob = F.softmax(logits, dim=1)\n\n return sensory, logits, prob\n\n def compute_aux(self, pix_feat: torch.Tensor, aux_inputs: Dict[str, torch.Tensor],\n selector: torch.Tensor) -> Dict[str, torch.Tensor]:\n return self.aux_computer(pix_feat, aux_inputs, selector)\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n def load_weights(self, src_dict, init_as_zero_if_needed=False) -> None:\n if not self.single_object:\n # Map single-object weight to multi-object weight (4->5 out channels in conv1)\n for k in list(src_dict.keys()):\n if k == 'mask_encoder.conv1.weight':\n if src_dict[k].shape[1] == 4:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((64, 1, 7, 7), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif k == 'pixel_fuser.sensory_compress.weight':\n if src_dict[k].shape[1] == self.sensory_dim + 1:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((self.value_dim, 1, 1, 1), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif self.single_object:\n \"\"\"\n If the model is multiple-object and we are training in single-object, \n we strip the last channel of conv1.\n This is not supposed to happen in standard training except when users are trying to\n finetune a trained model with single object datasets.\n \"\"\"\n if src_dict['mask_encoder.conv1.weight'].shape[1] == 5:\n log.warning(f'Converting {k} from multiple objects to single object.'\n 'This is not supposed to happen in standard training.')\n src_dict[k] = src_dict[k][:, :-1]\n\n for k in src_dict:\n if k not in self.state_dict():\n log.info(f'Key {k} found in src_dict but not in self.state_dict()!!!')\n for k in self.state_dict():\n if k not in src_dict:\n log.info(f'Key {k} found in self.state_dict() but not in src_dict!!!')\n\n self.load_state_dict(src_dict, strict=False)\n\n @property\n def device(self) -> torch.device:\n return self.pixel_mean.device" }, { "identifier": "InferenceCore", "path": "cutie/inference/inference_core.py", "snippet": "class InferenceCore:\n def __init__(self,\n network: CUTIE,\n cfg: DictConfig,\n *,\n image_feature_store: ImageFeatureStore = None):\n self.network = network\n self.cfg = cfg\n self.mem_every = cfg.mem_every\n stagger_updates = cfg.stagger_updates\n self.chunk_size = cfg.chunk_size\n self.save_aux = cfg.save_aux\n self.max_internal_size = cfg.max_internal_size\n self.flip_aug = cfg.flip_aug\n\n self.curr_ti = -1\n self.last_mem_ti = 0\n # at which time indices should we update the sensory memory\n if stagger_updates >= self.mem_every:\n self.stagger_ti = set(range(1, self.mem_every + 1))\n else:\n self.stagger_ti = set(\n np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int))\n self.object_manager = ObjectManager()\n self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager)\n\n if image_feature_store is None:\n self.image_feature_store = ImageFeatureStore(self.network)\n else:\n self.image_feature_store = image_feature_store\n\n self.last_mask = None\n\n def clear_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory = MemoryManager(cfg=self.cfg, object_manager=self.object_manager)\n\n def clear_non_permanent_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory.clear_non_permanent_memory()\n\n def clear_sensory_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory.clear_sensory_memory()\n\n def update_config(self, cfg):\n self.mem_every = cfg['mem_every']\n self.memory.update_config(cfg)\n\n def _add_memory(self,\n image: torch.Tensor,\n pix_feat: torch.Tensor,\n prob: torch.Tensor,\n key: torch.Tensor,\n shrinkage: torch.Tensor,\n selection: torch.Tensor,\n *,\n is_deep_update: bool = True,\n force_permanent: bool = False) -> None:\n \"\"\"\n Memorize the given segmentation in all memory stores.\n\n The batch dimension is 1 if flip augmentation is not used.\n image: RGB image, (1/2)*3*H*W\n pix_feat: from the key encoder, (1/2)*_*H*W\n prob: (1/2)*num_objects*H*W, in [0, 1]\n key/shrinkage/selection: for anisotropic l2, (1/2)*_*H*W\n selection can be None if not using long-term memory\n is_deep_update: whether to use deep update (e.g. with the mask encoder)\n force_permanent: whether to force the memory to be permanent\n \"\"\"\n if prob.shape[1] == 0:\n # nothing to add\n log.warn('Trying to add an empty object mask to memory!')\n return\n\n if force_permanent:\n as_permanent = 'all'\n else:\n as_permanent = 'first'\n\n self.memory.initialize_sensory_if_needed(key, self.object_manager.all_obj_ids)\n msk_value, sensory, obj_value, self.obj_logits = self.network.encode_mask(\n image,\n pix_feat,\n self.memory.get_sensory(self.object_manager.all_obj_ids),\n prob,\n deep_update=is_deep_update,\n chunk_size=self.chunk_size,\n need_weights=self.save_aux)\n self.memory.add_memory(key,\n shrinkage,\n msk_value,\n obj_value,\n self.object_manager.all_obj_ids,\n selection=selection,\n as_permanent=as_permanent)\n self.last_mem_ti = self.curr_ti\n if is_deep_update:\n self.memory.update_sensory(sensory, self.object_manager.all_obj_ids)\n\n def _segment(self,\n key: torch.Tensor,\n selection: torch.Tensor,\n pix_feat: torch.Tensor,\n ms_features: Iterable[torch.Tensor],\n update_sensory: bool = True) -> torch.Tensor:\n \"\"\"\n Produce a segmentation using the given features and the memory\n\n The batch dimension is 1 if flip augmentation is not used.\n key/selection: for anisotropic l2: (1/2) * _ * H * W\n pix_feat: from the key encoder, (1/2) * _ * H * W\n ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W\n with strides 16, 8, and 4 respectively\n update_sensory: whether to update the sensory memory\n\n Returns: (num_objects+1)*H*W normalized probability; the first channel is the background\n \"\"\"\n bs = key.shape[0]\n if self.flip_aug:\n assert bs == 2\n else:\n assert bs == 1\n\n if not self.memory.engaged:\n log.warn('Trying to segment without any memory!')\n return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16),\n device=key.device,\n dtype=key.dtype)\n\n memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network)\n memory_readout = self.object_manager.realize_dict(memory_readout)\n sensory, _, pred_prob_with_bg = self.network.segment(ms_features,\n memory_readout,\n self.memory.get_sensory(\n self.object_manager.all_obj_ids),\n chunk_size=self.chunk_size,\n update_sensory=update_sensory)\n # remove batch dim\n if self.flip_aug:\n # average predictions of the non-flipped and flipped version\n pred_prob_with_bg = (pred_prob_with_bg[0] +\n torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2\n else:\n pred_prob_with_bg = pred_prob_with_bg[0]\n if update_sensory:\n self.memory.update_sensory(sensory, self.object_manager.all_obj_ids)\n return pred_prob_with_bg\n\n def step(self,\n image: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n objects: Optional[List[int]] = None,\n *,\n idx_mask: bool = True,\n end: bool = False,\n delete_buffer: bool = True,\n force_permanent: bool = False) -> torch.Tensor:\n \"\"\"\n Take a step with a new incoming image.\n If there is an incoming mask with new objects, we will memorize them.\n If there is no incoming mask, we will segment the image using the memory.\n In both cases, we will update the memory and return a segmentation.\n\n image: 3*H*W\n mask: H*W (if idx mask) or len(objects)*H*W or None\n objects: list of object ids that are valid in the mask Tensor.\n The ids themselves do not need to be consecutive/in order, but they need to be \n in the same position in the list as the corresponding mask\n in the tensor in non-idx-mask mode.\n objects is ignored if the mask is None. \n If idx_mask is False and objects is None, we sequentially infer the object ids.\n idx_mask: if True, mask is expected to contain an object id at every pixel.\n If False, mask should have multiple channels with each channel representing one object.\n end: if we are at the end of the sequence, we do not need to update memory\n if unsure just set it to False \n delete_buffer: whether to delete the image feature buffer after this step\n force_permanent: the memory recorded this frame will be added to the permanent memory\n \"\"\"\n if objects is None and mask is not None:\n assert not idx_mask\n objects = list(range(1, mask.shape[0] + 1))\n\n # resize input if needed -- currently only used for the GUI\n resize_needed = False\n if self.max_internal_size > 0:\n h, w = image.shape[-2:]\n min_side = min(h, w)\n if min_side > self.max_internal_size:\n resize_needed = True\n new_h = int(h / min_side * self.max_internal_size)\n new_w = int(w / min_side * self.max_internal_size)\n image = F.interpolate(image.unsqueeze(0),\n size=(new_h, new_w),\n mode='bilinear',\n align_corners=False)[0]\n if mask is not None:\n if idx_mask:\n mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(),\n size=(new_h, new_w),\n mode='nearest',\n align_corners=False)[0, 0].round().long()\n else:\n mask = F.interpolate(mask.unsqueeze(0),\n size=(new_h, new_w),\n mode='bilinear',\n align_corners=False)[0]\n\n self.curr_ti += 1\n\n image, self.pad = pad_divide_by(image, 16)\n image = image.unsqueeze(0) # add the batch dimension\n if self.flip_aug:\n image = torch.cat([image, torch.flip(image, dims=[-1])], dim=0)\n\n # whether to update the working memory\n is_mem_frame = ((self.curr_ti - self.last_mem_ti >= self.mem_every) or\n (mask is not None)) and (not end)\n # segment when there is no input mask or when the input mask is incomplete\n need_segment = (mask is None) or (self.object_manager.num_obj > 0\n and not self.object_manager.has_all(objects))\n update_sensory = ((self.curr_ti - self.last_mem_ti) in self.stagger_ti) and (not end)\n\n # encoding the image\n ms_feat, pix_feat = self.image_feature_store.get_features(self.curr_ti, image)\n key, shrinkage, selection = self.image_feature_store.get_key(self.curr_ti, image)\n\n # segmentation from memory if needed\n if need_segment:\n pred_prob_with_bg = self._segment(key,\n selection,\n pix_feat,\n ms_feat,\n update_sensory=update_sensory)\n\n # use the input mask if provided\n if mask is not None:\n # inform the manager of the new objects, and get a list of temporary id\n # temporary ids -- indicates the position of objects in the tensor\n # (starts with 1 due to the background channel)\n corresponding_tmp_ids, _ = self.object_manager.add_new_objects(objects)\n\n mask, _ = pad_divide_by(mask, 16)\n if need_segment:\n # merge predicted mask with the incomplete input mask\n pred_prob_no_bg = pred_prob_with_bg[1:]\n # use the mutual exclusivity of segmentation\n if idx_mask:\n pred_prob_no_bg[:, mask > 0] = 0\n else:\n pred_prob_no_bg[:, mask.max(0) > 0.5] = 0\n\n new_masks = []\n for mask_id, tmp_id in enumerate(corresponding_tmp_ids):\n if idx_mask:\n this_mask = (mask == objects[mask_id]).type_as(pred_prob_no_bg)\n else:\n this_mask = mask[tmp_id]\n if tmp_id > pred_prob_no_bg.shape[0]:\n new_masks.append(this_mask.unsqueeze(0))\n else:\n # +1 for padding the background channel\n pred_prob_no_bg[tmp_id - 1] = this_mask\n # new_masks are always in the order of tmp_id\n mask = torch.cat([pred_prob_no_bg, *new_masks], dim=0)\n elif idx_mask:\n # simply convert cls to one-hot representation\n if len(objects) == 0:\n if delete_buffer:\n self.image_feature_store.delete(self.curr_ti)\n log.warn('Trying to insert an empty mask as memory!')\n return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16),\n device=key.device,\n dtype=key.dtype)\n mask = torch.stack(\n [mask == objects[mask_id] for mask_id, _ in enumerate(corresponding_tmp_ids)],\n dim=0)\n pred_prob_with_bg = aggregate(mask, dim=0)\n pred_prob_with_bg = torch.softmax(pred_prob_with_bg, dim=0)\n\n self.last_mask = pred_prob_with_bg[1:].unsqueeze(0)\n if self.flip_aug:\n self.last_mask = torch.cat(\n [self.last_mask, torch.flip(self.last_mask, dims=[-1])], dim=0)\n\n # save as memory if needed\n if is_mem_frame or force_permanent:\n self._add_memory(image,\n pix_feat,\n self.last_mask,\n key,\n shrinkage,\n selection,\n force_permanent=force_permanent)\n\n if delete_buffer:\n self.image_feature_store.delete(self.curr_ti)\n\n output_prob = unpad(pred_prob_with_bg, self.pad)\n if resize_needed:\n # restore output to the original size\n output_prob = F.interpolate(output_prob.unsqueeze(0),\n size=(h, w),\n mode='bilinear',\n align_corners=False)[0]\n\n return output_prob\n\n def get_aux_outputs(self, image: torch.Tensor) -> Dict[str, torch.Tensor]:\n image, pads = pad_divide_by(image, 16)\n image = image.unsqueeze(0) # add the batch dimension\n _, pix_feat = self.image_feature_store.get_features(self.curr_ti, image)\n\n aux_inputs = self.memory.aux\n aux_outputs = self.network.compute_aux(pix_feat, aux_inputs, selector=None)\n aux_outputs['q_weights'] = aux_inputs['q_weights']\n aux_outputs['p_weights'] = aux_inputs['p_weights']\n\n for k, v in aux_outputs.items():\n if len(v.shape) == 5:\n aux_outputs[k] = F.interpolate(v[0],\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)\n elif 'weights' in k:\n b, num_objects, num_heads, num_queries, h, w = v.shape\n v = v.view(num_objects * num_heads, num_queries, h, w)\n v = F.interpolate(v, size=image.shape[-2:], mode='bilinear', align_corners=False)\n aux_outputs[k] = v.view(num_objects, num_heads, num_queries, *image.shape[-2:])\n else:\n aux_outputs[k] = F.interpolate(v,\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)[0]\n aux_outputs[k] = unpad(aux_outputs[k], pads)\n if 'weights' in k:\n weights = aux_outputs[k]\n weights = weights / (weights.max(-1, keepdim=True)[0].max(-2, keepdim=True)[0] +\n 1e-8)\n aux_outputs[k] = (weights * 255).cpu().numpy()\n else:\n aux_outputs[k] = (aux_outputs[k].softmax(dim=0) * 255).cpu().numpy()\n\n self.image_feature_store.delete(self.curr_ti)\n return aux_outputs\n\n def get_aux_object_weights(self, image: torch.Tensor) -> np.ndarray:\n image, pads = pad_divide_by(image, 16)\n # B*num_objects*H*W*num_queries -> num_objects*num_queries*H*W\n # weights = F.softmax(self.obj_logits, dim=-1)[0]\n weights = F.sigmoid(self.obj_logits)[0]\n weights = weights.permute(0, 3, 1, 2).contiguous()\n weights = F.interpolate(weights,\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)\n # weights = weights / (weights.max(-1, keepdim=True)[0].max(-2, keepdim=True)[0])\n weights = unpad(weights, pads)\n weights = (weights * 255).cpu().numpy()\n return weights" }, { "identifier": "ResourceManager", "path": "gui/resource_manager.py", "snippet": "class ResourceManager:\n def __init__(self, cfg: DictConfig):\n # determine inputs\n images = cfg['images']\n video = cfg['video']\n self.workspace = cfg['workspace']\n self.max_size = cfg['max_overall_size']\n self.palette = davis_palette\n\n # create temporary workspace if not specified\n if self.workspace is None:\n if images is not None:\n basename = path.basename(images)\n elif video is not None:\n basename = path.basename(video)[:-4]\n else:\n raise NotImplementedError('Either images, video, or workspace has to be specified')\n\n self.workspace = path.join('./workspace', basename)\n\n print(f'Workspace is in: {self.workspace}')\n with open_dict(cfg):\n cfg['workspace'] = self.workspace\n\n # determine the location of input images\n need_decoding = False\n need_resizing = False\n if path.exists(path.join(self.workspace, 'images')):\n pass\n elif images is not None:\n need_resizing = True\n elif video is not None:\n # will decode video into frames later\n need_decoding = True\n\n # create workspace subdirectories\n self.image_dir = path.join(self.workspace, 'images')\n self.mask_dir = path.join(self.workspace, 'masks')\n self.visualization_dir = path.join(self.workspace, 'visualization')\n self.soft_mask_dir = path.join(self.workspace, 'soft_masks')\n os.makedirs(self.image_dir, exist_ok=True)\n os.makedirs(self.mask_dir, exist_ok=True)\n os.makedirs(self.visualization_dir, exist_ok=True)\n os.makedirs(self.soft_mask_dir, exist_ok=True)\n\n # create all soft mask sub-directories\n for i in range(1, cfg['num_objects'] + 1):\n os.makedirs(path.join(self.soft_mask_dir, f'{i}'), exist_ok=True)\n\n # convert read functions to be buffered\n self.get_image = LRU(self._get_image_unbuffered, maxsize=cfg['buffer_size'])\n self.get_mask = LRU(self._get_mask_unbuffered, maxsize=cfg['buffer_size'])\n\n # extract frames from video\n if need_decoding:\n self._extract_frames(video)\n\n # copy/resize existing images to the workspace\n if need_resizing:\n self._copy_resize_frames(images)\n\n # read all frame names\n self.names = sorted(os.listdir(self.image_dir))\n self.names = [f[:-4] for f in self.names] # remove extensions\n self.length = len(self.names)\n\n assert self.length > 0, f'No images found! Check {self.workspace}/images. Remove folder if necessary.'\n\n print(f'{self.length} images found.')\n\n self.height, self.width = self.get_image(0).shape[:2]\n\n # create the saver threads for saving the masks/visualizations\n self.save_queue = Queue(maxsize=cfg['save_queue_size'])\n self.num_save_threads = cfg['num_save_threads']\n self.save_threads = [\n Thread(target=self.save_thread, args=(self.save_queue, ))\n for _ in range(self.num_save_threads)\n ]\n for t in self.save_threads:\n t.daemon = True\n t.start()\n\n def __del__(self):\n for _ in range(self.num_save_threads):\n self.save_queue.put(None)\n self.save_queue.join()\n for t in self.save_threads:\n t.join()\n\n def save_thread(self, queue: Queue):\n while True:\n args: SaveItem = queue.get()\n if args is None:\n queue.task_done()\n break\n if args.type == 'mask':\n # PIL image\n args.data.save(path.join(self.mask_dir, args.name + '.png'))\n elif args.type.startswith('visualization'):\n # numpy array, save with cv2\n vis_mode = args.type.split('_')[-1]\n data = cv2.cvtColor(args.data, cv2.COLOR_RGB2BGR)\n os.makedirs(path.join(self.visualization_dir, vis_mode), exist_ok=True)\n cv2.imwrite(path.join(self.visualization_dir, vis_mode, args.name + '.jpg'), data)\n elif args.type == 'soft_mask':\n # numpy array, save each channel with cv2\n num_channels = args.data.shape[0]\n # first channel is background -- ignore\n for i in range(1, num_channels):\n data = args.data[i]\n data = (data * 255).astype(np.uint8)\n cv2.imwrite(path.join(self.soft_mask_dir, f'{i}', args.name + '.png'), data)\n else:\n raise NotImplementedError\n queue.task_done()\n\n def _extract_frames(self, video: str):\n cap = cv2.VideoCapture(video)\n frame_index = 0\n print(f'Extracting frames from {video} into {self.image_dir}...')\n with tqdm() as bar:\n while (cap.isOpened()):\n _, frame = cap.read()\n if frame is None:\n break\n h, w = frame.shape[:2]\n if self.max_size > 0 and min(h, w) > self.max_size:\n new_w = (w * self.max_size // min(w, h))\n new_h = (h * self.max_size // min(w, h))\n frame = cv2.resize(frame, dsize=(new_w, new_h), interpolation=cv2.INTER_AREA)\n cv2.imwrite(path.join(self.image_dir, f'{frame_index:07d}.jpg'), frame)\n frame_index += 1\n bar.update()\n print('Done!')\n\n def _copy_resize_frames(self, images: str):\n image_list = os.listdir(images)\n print(f'Copying/resizing frames into {self.image_dir}...')\n for image_name in tqdm(image_list):\n if self.max_size < 0:\n # just copy\n shutil.copy2(path.join(images, image_name), self.image_dir)\n else:\n frame = cv2.imread(path.join(images, image_name))\n h, w = frame.shape[:2]\n if self.max_size > 0 and min(h, w) > self.max_size:\n new_w = (w * self.max_size // min(w, h))\n new_h = (h * self.max_size // min(w, h))\n frame = cv2.resize(frame, dsize=(new_w, new_h), interpolation=cv2.INTER_AREA)\n cv2.imwrite(path.join(self.image_dir, image_name), frame)\n print('Done!')\n\n def add_to_queue_with_warning(self, item: SaveItem):\n if self.save_queue.full():\n print(\n 'The save queue is full! You need more threads or faster IO. Program might pause.')\n self.save_queue.put(item)\n\n def save_mask(self, ti: int, mask: np.ndarray):\n # mask should be uint8 H*W without channels\n assert 0 <= ti < self.length\n assert isinstance(mask, np.ndarray)\n\n mask = Image.fromarray(mask)\n mask.putpalette(self.palette)\n self.invalidate(ti)\n self.add_to_queue_with_warning(SaveItem('mask', mask, self.names[ti]))\n\n def save_visualization(self, ti: int, vis_mode: str, image: np.ndarray):\n # image should be uint8 3*H*W\n assert 0 <= ti < self.length\n assert isinstance(image, np.ndarray)\n\n self.add_to_queue_with_warning(SaveItem(f'visualization_{vis_mode}', image, self.names[ti]))\n\n def save_soft_mask(self, ti: int, prob: np.ndarray):\n # mask should be float (num_objects+1)*H*W np array\n assert 0 <= ti < self.length\n assert isinstance(prob, np.ndarray)\n\n self.add_to_queue_with_warning(SaveItem('soft_mask', prob, self.names[ti]))\n\n def _get_image_unbuffered(self, ti: int):\n # returns H*W*3 uint8 array\n assert 0 <= ti < self.length\n\n image = Image.open(path.join(self.image_dir, self.names[ti] + '.jpg')).convert('RGB')\n image = np.array(image)\n return image\n\n def _get_mask_unbuffered(self, ti: int):\n # returns H*W uint8 array\n assert 0 <= ti < self.length\n\n mask_path = path.join(self.mask_dir, self.names[ti] + '.png')\n if path.exists(mask_path):\n mask = Image.open(mask_path)\n mask = np.array(mask)\n return mask\n else:\n return None\n\n def import_mask(self, file_name: str, size: Optional[Tuple[int, int]] = None):\n # read an mask file and resize it to exactly match the canvas size\n image = Image.open(file_name)\n if size is not None:\n # PIL uses (width, height)\n image = image.resize((size[1], size[0]), resample=Image.Resampling.NEAREST)\n image = np.array(image)\n return image\n\n def import_layer(self, file_name: str, size: Tuple[int, int]):\n # read a RGBA/RGB file and resize it such that the entire layer is visible in the canvas\n # and then pad it to the canvas size (h, w)\n image = Image.open(file_name).convert('RGBA')\n im_w, im_h = image.size\n im_ratio = im_w / im_h\n canvas_ratio = size[1] / size[0]\n if im_ratio < canvas_ratio:\n # fit height\n new_h = size[0]\n new_w = int(new_h * im_ratio)\n else:\n # fit width\n new_w = size[1]\n new_h = int(new_w / im_ratio)\n image = image.resize((new_w, new_h), resample=Image.Resampling.BILINEAR)\n image = np.array(image)\n # padding\n pad_h = (size[0] - new_h) // 2\n pad_w = (size[1] - new_w) // 2\n image = np.pad(image,\n ((pad_h, size[0] - new_h - pad_h), (pad_w, size[1] - new_w - pad_w), (0, 0)),\n mode='constant',\n constant_values=0)\n\n return image\n\n def invalidate(self, ti: int):\n # the image buffer is never invalidated\n self.get_mask.invalidate((ti, ))\n\n def __len__(self):\n return self.length\n\n @property\n def T(self) -> int:\n return self.length\n\n @property\n def h(self) -> int:\n return self.height\n\n @property\n def w(self) -> int:\n return self.width" }, { "identifier": "GUI", "path": "gui/gui.py", "snippet": "class GUI(QWidget):\n def __init__(self, controller, cfg: DictConfig) -> None:\n super().__init__()\n\n # callbacks to be set by the controller\n self.on_mouse_motion_xy = None\n self.click_fn = None\n\n self.controller = controller\n self.cfg = cfg\n self.h = controller.h\n self.w = controller.w\n self.T = controller.T\n\n # set up the window\n self.setWindowTitle(f'Cutie demo: {cfg[\"workspace\"]}')\n self.setGeometry(100, 100, self.w + 200, self.h + 200)\n self.setWindowIcon(QIcon('docs/icon.png'))\n\n # set up some buttons\n self.play_button = QPushButton('Play video')\n self.play_button.clicked.connect(self.on_play_video)\n self.commit_button = QPushButton('Commit to permanent memory')\n self.commit_button.clicked.connect(controller.on_commit)\n self.export_video_button = QPushButton('Export as video')\n self.export_video_button.clicked.connect(controller.on_export_visualization)\n self.export_binary_button = QPushButton('Export binary masks')\n self.export_binary_button.clicked.connect(controller.on_export_binary)\n\n self.forward_run_button = QPushButton('Propagate forward')\n self.forward_run_button.clicked.connect(controller.on_forward_propagation)\n self.forward_run_button.setMinimumWidth(150)\n\n self.backward_run_button = QPushButton('Propagate backward')\n self.backward_run_button.clicked.connect(controller.on_backward_propagation)\n self.backward_run_button.setMinimumWidth(150)\n\n # universal progressbar\n self.progressbar = QProgressBar()\n self.progressbar.setMinimum(0)\n self.progressbar.setMaximum(100)\n self.progressbar.setValue(0)\n self.progressbar.setMinimumWidth(200)\n\n self.reset_frame_button = QPushButton('Reset frame')\n self.reset_frame_button.clicked.connect(controller.on_reset_mask)\n self.reset_object_button = QPushButton('Reset object')\n self.reset_object_button.clicked.connect(controller.on_reset_object)\n\n # set up the LCD\n self.lcd = QTextEdit()\n self.lcd.setReadOnly(True)\n self.lcd.setMaximumHeight(28)\n self.lcd.setMaximumWidth(150)\n self.lcd.setText('{: 5d} / {: 5d}'.format(0, controller.T - 1))\n\n # current object id\n self.object_dial = QSpinBox()\n self.object_dial.setReadOnly(False)\n self.object_dial.setMinimumSize(50, 30)\n self.object_dial.setMinimum(1)\n self.object_dial.setMaximum(controller.num_objects)\n self.object_dial.editingFinished.connect(controller.on_object_dial_change)\n\n self.object_color = QLabel()\n self.object_color.setMinimumSize(100, 30)\n self.object_color.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.frame_name = QLabel()\n self.frame_name.setMinimumSize(100, 30)\n self.frame_name.setAlignment(Qt.AlignmentFlag.AlignLeft)\n\n # timeline slider\n self.tl_slider = QSlider(Qt.Orientation.Horizontal)\n self.tl_slider.valueChanged.connect(controller.on_slider_update)\n self.tl_slider.setMinimum(0)\n self.tl_slider.setMaximum(controller.T - 1)\n self.tl_slider.setValue(0)\n self.tl_slider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.tl_slider.setTickInterval(1)\n\n # combobox\n self.combo = QComboBox(self)\n self.combo.addItem(\"mask\")\n self.combo.addItem(\"davis\")\n self.combo.addItem(\"fade\")\n self.combo.addItem(\"light\")\n self.combo.addItem(\"popup\")\n self.combo.addItem(\"layer\")\n self.combo.setCurrentText('davis')\n self.combo.currentTextChanged.connect(controller.set_vis_mode)\n\n self.save_visualization_checkbox = QCheckBox(self)\n self.save_visualization_checkbox.toggled.connect(controller.on_save_visualization_toggle)\n self.save_visualization_checkbox.setChecked(False)\n\n self.save_soft_mask_checkbox = QCheckBox(self)\n self.save_soft_mask_checkbox.toggled.connect(controller.on_save_soft_mask_toggle)\n self.save_soft_mask_checkbox.setChecked(False)\n\n # controls for output FPS and bitrate\n self.fps_dial = QSpinBox()\n self.fps_dial.setReadOnly(False)\n self.fps_dial.setMinimumSize(40, 30)\n self.fps_dial.setMinimum(1)\n self.fps_dial.setMaximum(60)\n self.fps_dial.setValue(cfg['output_fps'])\n self.fps_dial.editingFinished.connect(controller.on_fps_dial_change)\n\n self.bitrate_dial = QSpinBox()\n self.bitrate_dial.setReadOnly(False)\n self.bitrate_dial.setMinimumSize(40, 30)\n self.bitrate_dial.setMinimum(1)\n self.bitrate_dial.setMaximum(100)\n self.bitrate_dial.setValue(cfg['output_bitrate'])\n self.bitrate_dial.editingFinished.connect(controller.on_bitrate_dial_change)\n\n # Main canvas -> QLabel\n self.main_canvas = QLabel()\n self.main_canvas.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)\n self.main_canvas.setAlignment(Qt.AlignmentFlag.AlignCenter)\n self.main_canvas.setMinimumSize(100, 100)\n\n self.main_canvas.mousePressEvent = self.on_mouse_press\n self.main_canvas.mouseMoveEvent = self.on_mouse_motion\n self.main_canvas.setMouseTracking(True) # Required for all-time tracking\n self.main_canvas.mouseReleaseEvent = self.on_mouse_release\n\n # clearing memory\n self.clear_all_mem_button = QPushButton('Reset all memory')\n self.clear_all_mem_button.clicked.connect(controller.on_clear_memory)\n self.clear_non_perm_mem_button = QPushButton('Reset non-permanent memory')\n self.clear_non_perm_mem_button.clicked.connect(controller.on_clear_non_permanent_memory)\n\n # displaying memory usage\n self.perm_mem_gauge, self.perm_mem_gauge_layout = create_gauge('Permanent memory size')\n self.work_mem_gauge, self.work_mem_gauge_layout = create_gauge('Working memory size')\n self.long_mem_gauge, self.long_mem_gauge_layout = create_gauge('Long-term memory size')\n self.gpu_mem_gauge, self.gpu_mem_gauge_layout = create_gauge(\n 'GPU mem. (all proc, w/ caching)')\n self.torch_mem_gauge, self.torch_mem_gauge_layout = create_gauge(\n 'GPU mem. (torch, w/o caching)')\n\n # Parameters setting\n self.work_mem_min, self.work_mem_min_layout = create_parameter_box(\n 1, 100, 'Min. working memory frames', callback=controller.on_work_min_change)\n self.work_mem_max, self.work_mem_max_layout = create_parameter_box(\n 2, 100, 'Max. working memory frames', callback=controller.on_work_max_change)\n self.long_mem_max, self.long_mem_max_layout = create_parameter_box(\n 1000,\n 100000,\n 'Max. long-term memory size',\n step=1000,\n callback=controller.update_config)\n self.mem_every_box, self.mem_every_box_layout = create_parameter_box(\n 1, 100, 'Memory frame every (r)', callback=controller.update_config)\n\n # import mask/layer\n self.import_mask_button = QPushButton('Import mask')\n self.import_mask_button.clicked.connect(controller.on_import_mask)\n self.import_layer_button = QPushButton('Import layer')\n self.import_layer_button.clicked.connect(controller.on_import_layer)\n\n # Console on the GUI\n self.console = QPlainTextEdit()\n self.console.setReadOnly(True)\n self.console.setMinimumHeight(100)\n self.console.setMaximumHeight(100)\n\n # Tips for the users\n self.tips = QTextEdit()\n self.tips.setReadOnly(True)\n self.tips.setTextInteractionFlags(Qt.NoTextInteraction)\n self.tips.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)\n with open('./gui/TIPS.md') as f:\n self.tips.setMarkdown(f.read())\n\n # navigator\n navi = QHBoxLayout()\n\n interact_subbox = QVBoxLayout()\n interact_topbox = QHBoxLayout()\n interact_botbox = QHBoxLayout()\n interact_topbox.setAlignment(Qt.AlignmentFlag.AlignCenter)\n interact_topbox.addWidget(self.lcd)\n interact_topbox.addWidget(self.play_button)\n interact_topbox.addWidget(self.reset_frame_button)\n interact_topbox.addWidget(self.reset_object_button)\n interact_botbox.addWidget(QLabel('Current object ID:'))\n interact_botbox.addWidget(self.object_dial)\n interact_botbox.addWidget(self.object_color)\n interact_botbox.addWidget(self.frame_name)\n interact_subbox.addLayout(interact_topbox)\n interact_subbox.addLayout(interact_botbox)\n interact_botbox.setAlignment(Qt.AlignmentFlag.AlignLeft)\n navi.addLayout(interact_subbox)\n\n apply_fixed_size_policy = lambda x: x.setSizePolicy(QSizePolicy.Policy.Fixed, QSizePolicy.\n Policy.Fixed)\n apply_to_all_children_widget(interact_topbox, apply_fixed_size_policy)\n apply_to_all_children_widget(interact_botbox, apply_fixed_size_policy)\n\n navi.addStretch(1)\n navi.addStretch(1)\n overlay_subbox = QVBoxLayout()\n overlay_topbox = QHBoxLayout()\n overlay_botbox = QHBoxLayout()\n overlay_topbox.setAlignment(Qt.AlignmentFlag.AlignLeft)\n overlay_botbox.setAlignment(Qt.AlignmentFlag.AlignLeft)\n overlay_topbox.addWidget(QLabel('Overlay mode'))\n overlay_topbox.addWidget(self.combo)\n overlay_topbox.addWidget(QLabel('Save soft mask during propagation'))\n overlay_topbox.addWidget(self.save_soft_mask_checkbox)\n overlay_topbox.addWidget(self.export_binary_button)\n overlay_botbox.addWidget(QLabel('Save overlay'))\n overlay_botbox.addWidget(self.save_visualization_checkbox)\n overlay_botbox.addWidget(self.export_video_button)\n overlay_botbox.addWidget(QLabel('Output FPS: '))\n overlay_botbox.addWidget(self.fps_dial)\n overlay_botbox.addWidget(QLabel('Output bitrate (Mbps): '))\n overlay_botbox.addWidget(self.bitrate_dial)\n overlay_subbox.addLayout(overlay_topbox)\n overlay_subbox.addLayout(overlay_botbox)\n navi.addLayout(overlay_subbox)\n apply_to_all_children_widget(overlay_topbox, apply_fixed_size_policy)\n apply_to_all_children_widget(overlay_botbox, apply_fixed_size_policy)\n\n navi.addStretch(1)\n control_subbox = QVBoxLayout()\n control_topbox = QHBoxLayout()\n control_botbox = QHBoxLayout()\n control_topbox.addWidget(self.commit_button)\n control_topbox.addWidget(self.forward_run_button)\n control_topbox.addWidget(self.backward_run_button)\n control_botbox.addWidget(self.progressbar)\n control_subbox.addLayout(control_topbox)\n control_subbox.addLayout(control_botbox)\n navi.addLayout(control_subbox)\n\n # Drawing area main canvas\n draw_area = QHBoxLayout()\n draw_area.addWidget(self.main_canvas, 4)\n\n # right area\n right_area = QVBoxLayout()\n right_area.setAlignment(Qt.AlignmentFlag.AlignBottom)\n right_area.addWidget(self.tips)\n # right_area.addStretch(1)\n\n # Parameters\n right_area.addLayout(self.perm_mem_gauge_layout)\n right_area.addLayout(self.work_mem_gauge_layout)\n right_area.addLayout(self.long_mem_gauge_layout)\n right_area.addLayout(self.gpu_mem_gauge_layout)\n right_area.addLayout(self.torch_mem_gauge_layout)\n right_area.addWidget(self.clear_all_mem_button)\n right_area.addWidget(self.clear_non_perm_mem_button)\n right_area.addLayout(self.work_mem_min_layout)\n right_area.addLayout(self.work_mem_max_layout)\n right_area.addLayout(self.long_mem_max_layout)\n right_area.addLayout(self.mem_every_box_layout)\n\n # import mask/layer\n import_area = QHBoxLayout()\n import_area.setAlignment(Qt.AlignmentFlag.AlignBottom)\n import_area.addWidget(self.import_mask_button)\n import_area.addWidget(self.import_layer_button)\n right_area.addLayout(import_area)\n\n # console\n right_area.addWidget(self.console)\n\n draw_area.addLayout(right_area, 1)\n\n layout = QVBoxLayout()\n layout.addLayout(draw_area)\n layout.addWidget(self.tl_slider)\n layout.addLayout(navi)\n self.setLayout(layout)\n\n # timer to play video\n self.timer = QTimer()\n self.timer.setSingleShot(False)\n self.timer.timeout.connect(controller.on_play_video_timer)\n\n # timer to update GPU usage\n self.gpu_timer = QTimer()\n self.gpu_timer.setSingleShot(False)\n self.gpu_timer.timeout.connect(controller.on_gpu_timer)\n self.gpu_timer.setInterval(2000)\n self.gpu_timer.start()\n\n # Objects shortcuts\n for i in range(1, controller.num_objects + 1):\n QShortcut(QKeySequence(str(i)),\n self).activated.connect(functools.partial(controller.hit_number_key, i))\n QShortcut(QKeySequence(f\"Ctrl+{i}\"),\n self).activated.connect(functools.partial(controller.hit_number_key, i))\n\n # <- and -> shortcuts\n QShortcut(QKeySequence(Qt.Key.Key_Left), self).activated.connect(controller.on_prev_frame)\n QShortcut(QKeySequence(Qt.Key.Key_Right), self).activated.connect(controller.on_next_frame)\n\n def resizeEvent(self, event):\n self.controller.show_current_frame()\n\n def text(self, text):\n self.console.moveCursor(QTextCursor.MoveOperation.End)\n self.console.insertPlainText(text + '\\n')\n\n def set_canvas(self, image):\n height, width, channel = image.shape\n bytesPerLine = 3 * width\n\n qImg = QImage(image.data, width, height, bytesPerLine, QImage.Format.Format_RGB888)\n self.main_canvas.setPixmap(\n QPixmap(\n qImg.scaled(self.main_canvas.size(), Qt.AspectRatioMode.KeepAspectRatio,\n Qt.TransformationMode.FastTransformation)))\n\n self.main_canvas_size = self.main_canvas.size()\n self.image_size = qImg.size()\n\n def update_slider(self, value):\n self.lcd.setText('{: 3d} / {: 3d}'.format(value, self.controller.T - 1))\n self.tl_slider.setValue(value)\n\n def pixel_pos_to_image_pos(self, x, y):\n # Un-scale and un-pad the label coordinates into image coordinates\n oh, ow = self.image_size.height(), self.image_size.width()\n nh, nw = self.main_canvas_size.height(), self.main_canvas_size.width()\n\n h_ratio = nh / oh\n w_ratio = nw / ow\n dominate_ratio = min(h_ratio, w_ratio)\n\n # Solve scale\n x /= dominate_ratio\n y /= dominate_ratio\n\n # Solve padding\n fh, fw = nh / dominate_ratio, nw / dominate_ratio\n x -= (fw - ow) / 2\n y -= (fh - oh) / 2\n\n return x, y\n\n def is_pos_out_of_bound(self, x, y):\n x, y = self.pixel_pos_to_image_pos(x, y)\n\n out_of_bound = ((x < 0) or (y < 0) or (x > self.w - 1) or (y > self.h - 1))\n\n return out_of_bound\n\n def get_scaled_pos(self, x, y):\n x, y = self.pixel_pos_to_image_pos(x, y)\n\n x = max(0, min(self.w - 1, x))\n y = max(0, min(self.h - 1, y))\n\n return x, y\n\n def forward_propagation_start(self):\n self.backward_run_button.setEnabled(False)\n self.forward_run_button.setText('Pause propagation')\n\n def backward_propagation_start(self):\n self.forward_run_button.setEnabled(False)\n self.backward_run_button.setText('Pause propagation')\n\n def pause_propagation(self):\n self.forward_run_button.setEnabled(True)\n self.backward_run_button.setEnabled(True)\n self.clear_all_mem_button.setEnabled(True)\n self.clear_non_perm_mem_button.setEnabled(True)\n self.forward_run_button.setText('Propagate forward')\n self.backward_run_button.setText('propagate backward')\n self.tl_slider.setEnabled(True)\n\n def process_events(self):\n QApplication.processEvents()\n\n def on_mouse_press(self, event):\n if self.is_pos_out_of_bound(event.position().x(), event.position().y()):\n return\n\n ex, ey = self.get_scaled_pos(event.position().x(), event.position().y())\n if event.button() == Qt.MouseButton.LeftButton:\n action = 'left'\n elif event.button() == Qt.MouseButton.RightButton:\n action = 'right'\n elif event.button() == Qt.MouseButton.MiddleButton:\n action = 'middle'\n\n self.click_fn(action, ex, ey)\n\n def on_mouse_motion(self, event):\n ex, ey = self.get_scaled_pos(event.position().x(), event.position().y())\n self.on_mouse_motion_xy(ex, ey)\n\n def on_mouse_release(self, event):\n pass\n\n def on_play_video(self):\n if self.timer.isActive():\n self.timer.stop()\n self.play_button.setText('Play video')\n else:\n self.timer.start(1000 // 30)\n self.play_button.setText('Stop video')\n\n def open_file(self, prompt):\n options = QFileDialog.Options()\n file_name, _ = QFileDialog.getOpenFileName(self,\n prompt,\n \"\",\n \"Image files (*)\",\n options=options)\n return file_name\n\n def set_object_color(self, object_id: int):\n r, g, b = davis_palette_np[object_id]\n rgb = f'rgb({r},{g},{b})'\n self.object_color.setStyleSheet('QLabel {background: ' + rgb + ';}')\n self.object_color.setText(f'{object_id}')\n\n def progressbar_update(self, progress: float):\n self.progressbar.setValue(int(progress * 100))\n self.process_events()" }, { "identifier": "ClickController", "path": "gui/click_controller.py", "snippet": "class ClickController:\n def __init__(self, checkpoint_path: str, device: str = 'cuda', max_size: int = 800):\n model = utils.load_is_model(checkpoint_path, device, cpu_dist_maps=True)\n\n # Predictor params\n zoomin_params = {\n 'skip_clicks': 1,\n 'target_size': 480,\n 'expansion_ratio': 1.4,\n }\n\n predictor_params = {\n 'brs_mode': 'f-BRS-B',\n # 'brs_mode': 'NoBRS',\n 'prob_thresh': 0.5,\n 'zoom_in_params': zoomin_params,\n 'predictor_params': {\n 'net_clicks_limit': 8,\n 'max_size': max_size,\n },\n 'brs_opt_func_params': {\n 'min_iou_diff': 1e-3\n },\n 'lbfgs_params': {\n 'maxfun': 20\n },\n 'with_flip': True,\n }\n\n self.controller = InteractiveController(model, device, predictor_params)\n self.anchored = False\n self.device = device\n\n def unanchor(self):\n self.anchored = False\n\n def interact(self, image: torch.Tensor, x: int, y: int, is_positive: bool,\n prev_mask: torch.Tensor):\n if not self.anchored:\n image = image.to(self.device, non_blocking=True)\n self.controller.set_image(image)\n self.controller.reset_predictor()\n self.anchored = True\n\n self.controller.add_click(x, y, is_positive, prev_mask=prev_mask)\n # return self.controller.result_mask\n return self.controller.probs_history[-1][1]\n # return (self.controller.probs_history[-1][1] > 0.5).float()\n\n def undo(self):\n self.controller.undo_click()\n if len(self.controller.probs_history) == 0:\n return None\n else:\n return (self.controller.probs_history[-1][1] > 0.5).float()" }, { "identifier": "PropagationReader", "path": "gui/reader.py", "snippet": "class PropagationReader(Dataset):\n def __init__(self, res_man: ResourceManager, start_ti: int, direction: Literal['forward',\n 'backward']):\n self.res_man = res_man\n self.start_ti = start_ti\n self.direction = direction\n\n # skip the first frame\n if self.direction == 'forward':\n self.start_ti += 1\n self.length = self.res_man.T - self.start_ti\n elif self.direction == 'backward':\n self.start_ti -= 1\n self.length = self.start_ti + 1\n else:\n raise NotImplementedError\n\n self.to_tensor = ToTensor()\n\n def __getitem__(self, index: int):\n if self.direction == 'forward':\n ti = self.start_ti + index\n elif self.direction == 'backward':\n ti = self.start_ti - index\n else:\n raise NotImplementedError\n\n assert 0 <= ti < self.res_man.T\n\n image = self.res_man.get_image(ti)\n image_torch = self.to_tensor(image)\n\n return image, image_torch\n\n def __len__(self):\n return self.length" }, { "identifier": "get_data_loader", "path": "gui/reader.py", "snippet": "def get_data_loader(dataset: Dataset, num_workers: int):\n if 'linux' in sys.platform:\n loader = DataLoader(dataset,\n batch_size=None,\n shuffle=False,\n num_workers=num_workers,\n collate_fn=lambda x: x)\n else:\n print(f'Non-linux platform {sys.platform} detected, using single-threaded dataloader')\n loader = DataLoader(dataset,\n batch_size=None,\n shuffle=False,\n num_workers=0,\n collate_fn=lambda x: x)\n return loader" }, { "identifier": "convert_frames_to_video", "path": "gui/exporter.py", "snippet": "def convert_frames_to_video(\n image_folder: str,\n output_path: str,\n fps: int = 24,\n bitrate: int = 1, # in Mbps\n progress_callback=None) -> None:\n images = [img for img in sorted(os.listdir(image_folder)) if img.endswith(\".jpg\")]\n frame = cv2.imread(os.path.join(image_folder, images[0]))\n height, width, layers = frame.shape\n\n output = av.open(output_path, mode=\"w\")\n\n stream = output.add_stream(\"h264\", rate=fps)\n stream.width = width\n stream.height = height\n stream.pix_fmt = \"yuv420p\"\n stream.bit_rate = bitrate * (10**7)\n\n for i, img_path in enumerate(images):\n img = cv2.imread(os.path.join(image_folder, img_path))\n frame = av.VideoFrame.from_ndarray(img, format='bgr24')\n packet = stream.encode(frame)\n output.mux(packet)\n\n if progress_callback is not None and i % 10 == 0:\n progress_callback(i / len(images))\n\n # flush\n packet = stream.encode(None)\n output.mux(packet)\n\n output.close()" }, { "identifier": "convert_mask_to_binary", "path": "gui/exporter.py", "snippet": "def convert_mask_to_binary(mask_folder: str,\n output_path: str,\n target_objects: List[int],\n progress_callback=None) -> None:\n masks = [img for img in sorted(os.listdir(mask_folder)) if img.endswith(\".png\")]\n\n for i, mask_path in enumerate(masks):\n mask = Image.open(os.path.join(mask_folder, mask_path))\n mask = np.array(mask)\n mask = np.where(np.isin(mask, target_objects), 255, 0)\n cv2.imwrite(os.path.join(output_path, mask_path), mask)\n\n if progress_callback is not None and i % 10 == 0:\n progress_callback(i / len(masks))" }, { "identifier": "download_models_if_needed", "path": "scripts/download_models.py", "snippet": "def download_models_if_needed():\n os.makedirs('weights', exist_ok=True)\n for link, md5 in _links:\n # download file if not exists with a progressbar\n filename = link.split('/')[-1]\n if not os.path.exists(os.path.join('weights', filename)) or hashlib.md5(open(os.path.join('weights', filename), 'rb').read()).hexdigest() != md5:\n print(f'Downloading {filename}...')\n r = requests.get(link, stream=True)\n total_size = int(r.headers.get('content-length', 0))\n block_size = 1024\n t = tqdm(total=total_size, unit='iB', unit_scale=True)\n with open(os.path.join('weights', filename), 'wb') as f:\n for data in r.iter_content(block_size):\n t.update(len(data))\n f.write(data)\n t.close()\n if total_size != 0 and t.n != total_size:\n raise RuntimeError('Error while downloading %s' % filename)" } ]
import os import logging import cv2 import torch import numpy as np from os import path from typing import Literal from torch import mps from torch import autocast from torchvision.transforms.functional import to_tensor from omegaconf import DictConfig, open_dict from cutie.model.cutie import CUTIE from cutie.inference.inference_core import InferenceCore from gui.interaction import * from gui.interactive_utils import * from gui.resource_manager import ResourceManager from gui.gui import GUI from gui.click_controller import ClickController from gui.reader import PropagationReader, get_data_loader from gui.exporter import convert_frames_to_video, convert_mask_to_binary from scripts.download_models import download_models_if_needed
17,344
self.curr_mask.fill(0) else: self.curr_mask = loaded_mask.copy() self.curr_prob = None def convert_current_image_mask_torch(self, no_mask: bool = False): if self.curr_image_torch is None: self.curr_image_torch = to_tensor(self.curr_image_np).to(self.device, non_blocking=True) if self.curr_prob is None and not no_mask: self.curr_prob = index_numpy_to_one_hot_torch(self.curr_mask, self.num_objects + 1).to( self.device, non_blocking=True) def compose_current_im(self): self.vis_image = get_visualization(self.vis_mode, self.curr_image_np, self.curr_mask, self.overlay_layer, self.vis_target_objects) def update_canvas(self): self.gui.set_canvas(self.vis_image) def update_current_image_fast(self): # fast path, uses gpu. Changes the image in-place to avoid copying # thus current_image_torch must be voided afterwards self.vis_image = get_visualization_torch(self.vis_mode, self.curr_image_torch, self.curr_prob, self.overlay_layer_torch, self.vis_target_objects) self.curr_image_torch = None self.vis_image = np.ascontiguousarray(self.vis_image) if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) if self.save_soft_mask: self.res_man.save_soft_mask(self.curr_ti, self.curr_prob.cpu().numpy()) self.gui.set_canvas(self.vis_image) def show_current_frame(self, fast: bool = False): # Re-compute overlay and show the image if fast: self.update_current_image_fast() else: self.compose_current_im() if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) self.update_canvas() self.gui.update_slider(self.curr_ti) self.gui.frame_name.setText(self.res_man.names[self.curr_ti] + '.jpg') def set_vis_mode(self): self.vis_mode = self.gui.combo.currentText() self.show_current_frame() def save_current_mask(self): # save mask to hard disk self.res_man.save_mask(self.curr_ti, self.curr_mask) def on_slider_update(self): # if we are propagating, the on_run function will take care of everything # don't do duplicate work here self.curr_ti = self.gui.tl_slider.value() if not self.propagating: # with self.vis_cond: # self.vis_cond.notify() if self.curr_frame_dirty: self.save_current_mask() self.curr_frame_dirty = False self.reset_this_interaction() self.curr_ti = self.gui.tl_slider.value() self.load_current_image_mask() self.show_current_frame() def on_forward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_next_frame self.gui.forward_propagation_start() self.propagate_direction = 'forward' self.on_propagate() def on_backward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_prev_frame self.gui.backward_propagation_start() self.propagate_direction = 'backward' self.on_propagate() def on_pause(self): self.propagating = False self.gui.text(f'Propagation stopped at t={self.curr_ti}.') self.gui.pause_propagation() def on_propagate(self): # start to propagate with autocast(self.device, enabled=(self.amp and self.device == 'cuda')): self.convert_current_image_mask_torch() self.gui.text(f'Propagation started at t={self.curr_ti}.') self.processor.clear_sensory_memory() self.curr_prob = self.processor.step(self.curr_image_torch, self.curr_prob[1:], idx_mask=False) self.curr_mask = torch_prob_to_numpy_mask(self.curr_prob) # clear self.interacted_prob = None self.reset_this_interaction() self.show_current_frame(fast=True) self.propagating = True self.gui.clear_all_mem_button.setEnabled(False) self.gui.clear_non_perm_mem_button.setEnabled(False) self.gui.tl_slider.setEnabled(False) dataset = PropagationReader(self.res_man, self.curr_ti, self.propagate_direction)
# fix conflicts between qt5 and cv2 os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH") try: except: print('torch.MPS not available.') log = logging.getLogger() class MainController(): def __init__(self, cfg: DictConfig) -> None: super().__init__() self.initialized = False # setting up the workspace if cfg["workspace"] is None: if cfg["images"] is not None: basename = path.basename(cfg["images"]) elif cfg["video"] is not None: basename = path.basename(cfg["video"])[:-4] else: raise NotImplementedError('Either images, video, or workspace has to be specified') cfg["workspace"] = path.join(cfg['workspace_root'], basename) # reading arguments self.cfg = cfg self.num_objects = cfg['num_objects'] self.device = cfg['device'] self.amp = cfg['amp'] # initializing the network(s) self.initialize_networks() # main components self.res_man = ResourceManager(cfg) self.processor = InferenceCore(self.cutie, self.cfg) self.gui = GUI(self, self.cfg) # initialize control info self.length: int = self.res_man.length self.interaction: Interaction = None self.interaction_type: str = 'Click' self.curr_ti: int = 0 self.curr_object: int = 1 self.propagating: bool = False self.propagate_direction: Literal['forward', 'backward', 'none'] = 'none' self.last_ex = self.last_ey = 0 # current frame info self.curr_frame_dirty: bool = False self.curr_image_np: np.ndarray = np.zeros((self.h, self.w, 3), dtype=np.uint8) self.curr_image_torch: torch.Tensor = None self.curr_mask: np.ndarray = np.zeros((self.h, self.w), dtype=np.uint8) self.curr_prob: torch.Tensor = torch.zeros((self.num_objects + 1, self.h, self.w), dtype=torch.float).to(self.device) self.curr_prob[0] = 1 # visualization info self.vis_mode: str = 'davis' self.vis_image: np.ndarray = None self.save_visualization: bool = False self.save_soft_mask: bool = False self.interacted_prob: torch.Tensor = None self.overlay_layer: np.ndarray = None self.overlay_layer_torch: torch.Tensor = None # the object id used for popup/layer overlay self.vis_target_objects = list(range(1, self.num_objects + 1)) self.load_current_image_mask() self.show_current_frame() # initialize stuff self.update_memory_gauges() self.update_gpu_gauges() self.gui.work_mem_min.setValue(self.processor.memory.min_mem_frames) self.gui.work_mem_max.setValue(self.processor.memory.max_mem_frames) self.gui.long_mem_max.setValue(self.processor.memory.max_long_tokens) self.gui.mem_every_box.setValue(self.processor.mem_every) # for exporting videos self.output_fps = cfg['output_fps'] self.output_bitrate = cfg['output_bitrate'] # set callbacks self.gui.on_mouse_motion_xy = self.on_mouse_motion_xy self.gui.click_fn = self.click_fn self.gui.show() self.gui.text('Initialized.') self.initialized = True # try to load the default overlay self._try_load_layer('./docs/uiuc.png') self.gui.set_object_color(self.curr_object) self.update_config() def initialize_networks(self) -> None: download_models_if_needed() self.cutie = CUTIE(self.cfg).eval().to(self.device) model_weights = torch.load(self.cfg.weights, map_location=self.device) self.cutie.load_weights(model_weights) self.click_ctrl = ClickController(self.cfg.ritm_weights, device=self.device) def hit_number_key(self, number: int): if number == self.curr_object: return self.curr_object = number self.gui.object_dial.setValue(number) if self.click_ctrl is not None: self.click_ctrl.unanchor() self.gui.text(f'Current object changed to {number}.') self.gui.set_object_color(number) self.show_current_frame() def click_fn(self, action: Literal['left', 'right', 'middle'], x: int, y: int): if self.propagating: return last_interaction = self.interaction new_interaction = None with autocast(self.device, enabled=(self.amp and self.device == 'cuda')): if action in ['left', 'right']: # left: positive click # right: negative click self.convert_current_image_mask_torch() image = self.curr_image_torch if (last_interaction is None or last_interaction.tar_obj != self.curr_object): # create new interaction is needed self.complete_interaction() self.click_ctrl.unanchor() new_interaction = ClickInteraction(image, self.curr_prob, (self.h, self.w), self.click_ctrl, self.curr_object) if new_interaction is not None: self.interaction = new_interaction self.interaction.push_point(x, y, is_neg=(action == 'right')) self.interacted_prob = self.interaction.predict().to(self.device, non_blocking=True) self.update_interacted_mask() self.update_gpu_gauges() elif action == 'middle': # middle: select a new visualization object target_object = self.curr_mask[int(y), int(x)] if target_object in self.vis_target_objects: self.vis_target_objects.remove(target_object) else: self.vis_target_objects.append(target_object) self.gui.text(f'Overlay target(s) changed to {self.vis_target_objects}') self.show_current_frame() return else: raise NotImplementedError def load_current_image_mask(self, no_mask: bool = False): self.curr_image_np = self.res_man.get_image(self.curr_ti) self.curr_image_torch = None if not no_mask: loaded_mask = self.res_man.get_mask(self.curr_ti) if loaded_mask is None: self.curr_mask.fill(0) else: self.curr_mask = loaded_mask.copy() self.curr_prob = None def convert_current_image_mask_torch(self, no_mask: bool = False): if self.curr_image_torch is None: self.curr_image_torch = to_tensor(self.curr_image_np).to(self.device, non_blocking=True) if self.curr_prob is None and not no_mask: self.curr_prob = index_numpy_to_one_hot_torch(self.curr_mask, self.num_objects + 1).to( self.device, non_blocking=True) def compose_current_im(self): self.vis_image = get_visualization(self.vis_mode, self.curr_image_np, self.curr_mask, self.overlay_layer, self.vis_target_objects) def update_canvas(self): self.gui.set_canvas(self.vis_image) def update_current_image_fast(self): # fast path, uses gpu. Changes the image in-place to avoid copying # thus current_image_torch must be voided afterwards self.vis_image = get_visualization_torch(self.vis_mode, self.curr_image_torch, self.curr_prob, self.overlay_layer_torch, self.vis_target_objects) self.curr_image_torch = None self.vis_image = np.ascontiguousarray(self.vis_image) if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) if self.save_soft_mask: self.res_man.save_soft_mask(self.curr_ti, self.curr_prob.cpu().numpy()) self.gui.set_canvas(self.vis_image) def show_current_frame(self, fast: bool = False): # Re-compute overlay and show the image if fast: self.update_current_image_fast() else: self.compose_current_im() if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) self.update_canvas() self.gui.update_slider(self.curr_ti) self.gui.frame_name.setText(self.res_man.names[self.curr_ti] + '.jpg') def set_vis_mode(self): self.vis_mode = self.gui.combo.currentText() self.show_current_frame() def save_current_mask(self): # save mask to hard disk self.res_man.save_mask(self.curr_ti, self.curr_mask) def on_slider_update(self): # if we are propagating, the on_run function will take care of everything # don't do duplicate work here self.curr_ti = self.gui.tl_slider.value() if not self.propagating: # with self.vis_cond: # self.vis_cond.notify() if self.curr_frame_dirty: self.save_current_mask() self.curr_frame_dirty = False self.reset_this_interaction() self.curr_ti = self.gui.tl_slider.value() self.load_current_image_mask() self.show_current_frame() def on_forward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_next_frame self.gui.forward_propagation_start() self.propagate_direction = 'forward' self.on_propagate() def on_backward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_prev_frame self.gui.backward_propagation_start() self.propagate_direction = 'backward' self.on_propagate() def on_pause(self): self.propagating = False self.gui.text(f'Propagation stopped at t={self.curr_ti}.') self.gui.pause_propagation() def on_propagate(self): # start to propagate with autocast(self.device, enabled=(self.amp and self.device == 'cuda')): self.convert_current_image_mask_torch() self.gui.text(f'Propagation started at t={self.curr_ti}.') self.processor.clear_sensory_memory() self.curr_prob = self.processor.step(self.curr_image_torch, self.curr_prob[1:], idx_mask=False) self.curr_mask = torch_prob_to_numpy_mask(self.curr_prob) # clear self.interacted_prob = None self.reset_this_interaction() self.show_current_frame(fast=True) self.propagating = True self.gui.clear_all_mem_button.setEnabled(False) self.gui.clear_non_perm_mem_button.setEnabled(False) self.gui.tl_slider.setEnabled(False) dataset = PropagationReader(self.res_man, self.curr_ti, self.propagate_direction)
loader = get_data_loader(dataset, self.cfg.num_read_workers)
6
2023-10-19 17:49:24+00:00
24k
ZhengyiLuo/PerpetualHumanoidControl
scripts/vis/vis_smpl_o3d_multi.py
[ { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi]) for x in self.joint_names}\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n self.joint_range[\"L_Shoulder\"] *= 4\n self.joint_range[\"R_Shoulder\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n\n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, zero_pose=None, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = Jtr.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n joint_names = self.joint_names\n joint_pos = Jtr[0].numpy()\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}\n parents_dict = {joint_names[i]: joint_names[parents[i]] for i in range(len(joint_names))}\n channels = [\"z\", \"y\", \"x\"]\n skin_weights = self.lbs_weights.numpy()\n return (verts[0], jts_np[0], skin_weights, self.joint_names, joint_offsets, parents_dict, channels, self.joint_range)\n\n def get_mesh_offsets(self, zero_pose=None, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}\n joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents)}\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(betas.shape[0], 1), th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {joint_names[c]: (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:, c] for c, p in enumerate(smpl_joint_parents)}\n joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents)}\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLH_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLH_Parser(_SMPLH):\n\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi]) for x in self.joint_names}\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))}\n parents_dict = {names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))}\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}\n joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents)}\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLX_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLX_Parser(_SMPLX):\n\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi]) for x in self.joint_names}\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES]\n self.parents_to_use = np.concatenate([np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n \n \n\n def get_offsets(self, v_template=None, zero_pose=None, betas=torch.zeros(1, 26).float()):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = Jtr.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n joint_names = self.joint_names\n joint_pos = Jtr[0].numpy()\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}\n parents_dict = {joint_names[i]: joint_names[parents[i]] for i in range(len(joint_names))}\n channels = [\"z\", \"y\", \"x\"]\n skin_weights = self.lbs_weights.numpy()\n return (verts[0], jts_np[0], skin_weights, self.joint_names, joint_offsets, parents_dict, channels, self.joint_range)\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents) if joint_names[c] in self.joint_names}\n joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents) if joint_names[i] in self.joint_names}\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPL_BONE_ORDER_NAMES", "path": "uhc/smpllib/smpl_mujoco.py", "snippet": "class SMPLConverter:\n def __init__(self, model, new_model, smpl_model=\"smpl\"):\n def qpos_smpl_2_new(self, qpos):\n def qvel_smpl_2_new(self, qpvel):\n def qpos_new_2_smpl(self, qpos):\n def qvel_new_2_smpl(self, qvel):\n def jpos_new_2_smpl(self, jpos):\n def get_new_qpos_lim(self):\n def get_new_qvel_lim(self):\n def get_new_body_lim(self):\n def get_new_diff_weight(self):\n def get_new_jkp(self):\n def get_new_jkd(self):\n def get_new_a_scale(self):\n def get_new_torque_limit(self):\ndef smplh_to_smpl(pose):\ndef smpl_to_smplh(pose):\ndef smpl_to_qpose(\n pose,\n mj_model,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef smpl_to_qpose_multi(\n pose,\n offset,\n mujoco_body_order,\n num_people=1,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef smpl_to_qpose_torch(\n pose,\n mj_model,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef qpos_to_smpl(qpos, mj_model, smpl_model=\"smpl\"):\ndef qpos_to_smpl_torch(qpos, mj_model, smpl_model=\"smpl\"):\ndef smpl_6d_to_qpose(full_pose, model, normalize=False):\ndef normalize_smpl_pose(pose_aa, trans=None, random_root=False):" }, { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings. Each edge in the tree has a local\n translation associated with it which describes the distance between the two nodes that it\n connects. \n\n Basic Usage:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> t\n SkeletonTree(\n node_names=['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'],\n parent_indices=tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]),\n local_translation=tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n )\n >>> t.node_names\n ['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot']\n >>> t.parent_indices\n tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11])\n >>> t.local_translation\n tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n >>> t.parent_of('front_left_leg')\n 'torso'\n >>> t.index('front_right_foot')\n 6\n >>> t[2]\n 'aux_1'\n \"\"\"\n\n __example_mjcf_path__ = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"tests/ant.xml\")\n\n def __init__(self, node_names, parent_indices, local_translation):\n \"\"\"\n :param node_names: a list of names for each tree node\n :type node_names: List[str]\n :param parent_indices: an int32-typed tensor that represents the edge to its parent.\\\n -1 represents the root node\n :type parent_indices: Tensor\n :param local_translation: a 3d vector that gives local translation information\n :type local_translation: Tensor\n \"\"\"\n ln, lp, ll = len(node_names), len(parent_indices), len(local_translation)\n assert len(set((ln, lp, ll))) == 1\n self._node_names = node_names\n self._parent_indices = parent_indices.long()\n self._local_translation = local_translation\n self._node_indices = {self.node_names[i]: i for i in range(len(self))}\n\n def __len__(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self.node_names)\n\n def __iter__(self):\n \"\"\" iterator that iterate through the name of each node \"\"\"\n yield from self.node_names\n\n def __getitem__(self, item):\n \"\"\" get the name of the node given the index \"\"\"\n return self.node_names[item]\n\n def __repr__(self):\n return (\"SkeletonTree(\\n node_names={},\\n parent_indices={},\"\n \"\\n local_translation={}\\n)\".format(\n self._indent(repr(self.node_names)),\n self._indent(repr(self.parent_indices)),\n self._indent(repr(self.local_translation)),\n ))\n\n def _indent(self, s):\n return \"\\n \".join(s.split(\"\\n\"))\n\n @property\n def node_names(self):\n return self._node_names\n\n @property\n def parent_indices(self):\n return self._parent_indices\n\n @property\n def local_translation(self):\n return self._local_translation\n\n @property\n def num_joints(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self)\n\n @classmethod\n def from_dict(cls, dict_repr, *args, **kwargs):\n return cls(\n list(map(str, dict_repr[\"node_names\"])),\n TensorUtils.from_dict(dict_repr[\"parent_indices\"], *args, **kwargs),\n TensorUtils.from_dict(dict_repr[\"local_translation\"], *args, **kwargs),\n )\n\n def to_dict(self):\n return OrderedDict([\n (\"node_names\", self.node_names),\n (\"parent_indices\", tensor_to_dict(self.parent_indices)),\n (\"local_translation\", tensor_to_dict(self.local_translation)),\n ])\n\n @classmethod\n def from_mjcf(cls, path: str) -> \"SkeletonTree\":\n \"\"\"\n Parses a mujoco xml scene description file and returns a Skeleton Tree.\n We use the model attribute at the root as the name of the tree.\n \n :param path:\n :type path: string\n :return: The skeleton tree constructed from the mjcf file\n :rtype: SkeletonTree\n \"\"\"\n tree = ET.parse(path)\n xml_doc_root = tree.getroot()\n xml_world_body = xml_doc_root.find(\"worldbody\")\n if xml_world_body is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n # assume this is the root\n xml_body_root = xml_world_body.find(\"body\")\n if xml_body_root is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n\n node_names = []\n parent_indices = []\n local_translation = []\n\n # recursively adding all nodes into the skel_tree\n def _add_xml_node(xml_node, parent_index, node_index):\n node_name = xml_node.attrib.get(\"name\")\n # parse the local translation into float list\n pos = np.fromstring(xml_node.attrib.get(\"pos\"), dtype=float, sep=\" \")\n node_names.append(node_name)\n parent_indices.append(parent_index)\n local_translation.append(pos)\n curr_index = node_index\n node_index += 1\n for next_node in xml_node.findall(\"body\"):\n node_index = _add_xml_node(next_node, curr_index, node_index)\n return node_index\n\n _add_xml_node(xml_body_root, -1, 0)\n\n return cls(\n node_names,\n torch.from_numpy(np.array(parent_indices, dtype=np.int32)),\n torch.from_numpy(np.array(local_translation, dtype=np.float32)),\n )\n\n def parent_of(self, node_name):\n \"\"\" get the name of the parent of the given node\n\n :param node_name: the name of the node\n :type node_name: string\n :rtype: string\n \"\"\"\n return self[int(self.parent_indices[self.index(node_name)].item())]\n\n def index(self, node_name):\n \"\"\" get the index of the node\n \n :param node_name: the name of the node\n :type node_name: string\n :rtype: int\n \"\"\"\n return self._node_indices[node_name]\n\n def drop_nodes_by_names(self, node_names: List[str], pairwise_translation=None) -> \"SkeletonTree\":\n new_length = len(self) - len(node_names)\n new_node_names = []\n new_local_translation = torch.zeros(new_length, 3, dtype=self.local_translation.dtype)\n new_parent_indices = torch.zeros(new_length, dtype=self.parent_indices.dtype)\n parent_indices = self.parent_indices.numpy()\n new_node_indices: dict = {}\n new_node_index = 0\n for node_index in range(len(self)):\n if self[node_index] in node_names:\n continue\n tb_node_index = parent_indices[node_index]\n if tb_node_index != -1:\n local_translation = self.local_translation[node_index, :]\n while tb_node_index != -1 and self[tb_node_index] in node_names:\n local_translation += self.local_translation[tb_node_index, :]\n tb_node_index = parent_indices[tb_node_index]\n assert tb_node_index != -1, \"the root node cannot be dropped\"\n\n if pairwise_translation is not None:\n local_translation = pairwise_translation[tb_node_index, node_index, :]\n else:\n local_translation = self.local_translation[node_index, :]\n\n new_node_names.append(self[node_index])\n new_local_translation[new_node_index, :] = local_translation\n if tb_node_index == -1:\n new_parent_indices[new_node_index] = -1\n else:\n new_parent_indices[new_node_index] = new_node_indices[self[tb_node_index]]\n new_node_indices[self[node_index]] = new_node_index\n new_node_index += 1\n\n return SkeletonTree(new_node_names, new_parent_indices, new_local_translation)\n\n def keep_nodes_by_names(self, node_names: List[str], pairwise_translation=None) -> \"SkeletonTree\":\n nodes_to_drop = list(filter(lambda x: x not in node_names, self))\n return self.drop_nodes_by_names(nodes_to_drop, pairwise_translation)" }, { "identifier": "SkeletonMotion", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonMotion(SkeletonState):\n\n def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs):\n self._fps = fps\n super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs)\n\n def clone(self):\n return SkeletonMotion(self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps)\n\n @property\n def invariant_property(self):\n return {\n \"skeleton_tree\": self.skeleton_tree,\n \"is_local\": self.is_local,\n \"fps\": self.fps,\n }\n\n @property\n def global_velocity(self):\n \"\"\" global velocity \"\"\"\n curr_index = self.num_joints * 4 + 3\n return self.tensor[..., curr_index:curr_index + self.num_joints * 3].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 3)))\n\n @property\n def global_angular_velocity(self):\n \"\"\" global angular velocity \"\"\"\n curr_index = self.num_joints * 7 + 3\n return self.tensor[..., curr_index:curr_index + self.num_joints * 3].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 3)))\n\n @property\n def fps(self):\n \"\"\" number of frames per second \"\"\"\n return self._fps\n\n @property\n def time_delta(self):\n \"\"\" time between two adjacent frames \"\"\"\n return 1.0 / self.fps\n\n @property\n def global_root_velocity(self):\n \"\"\" global root velocity \"\"\"\n return self.global_velocity[..., 0, :]\n\n @property\n def global_root_angular_velocity(self):\n \"\"\" global root angular velocity \"\"\"\n return self.global_angular_velocity[..., 0, :]\n\n @classmethod\n def from_state_vector_and_velocity(\n cls,\n skeleton_tree,\n state_vector,\n global_velocity,\n global_angular_velocity,\n is_local,\n fps,\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state vector, global velocity and angular\n velocity at each joint.\n\n :param skeleton_tree: the skeleton tree that the motion is based on \n :type skeleton_tree: SkeletonTree\n :param state_vector: the state vector from the skeleton state by `.tensor`\n :type state_vector: Tensor\n :param global_velocity: the global velocity at each joint\n :type global_velocity: Tensor\n :param global_angular_velocity: the global angular velocity at each joint\n :type global_angular_velocity: Tensor\n :param is_local: if the rotation ins the state vector is given in local frame\n :type is_local: boolean\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n state_shape = state_vector.shape[:-1]\n v = global_velocity.reshape(*(state_shape + (-1,)))\n av = global_angular_velocity.reshape(*(state_shape + (-1,)))\n new_state_vector = torch.cat([state_vector, v, av], axis=-1)\n return cls(\n new_state_vector,\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n fps=fps,\n )\n\n @classmethod\n def from_skeleton_state(cls: Type[\"SkeletonMotion\"], skeleton_state: SkeletonState, fps: int):\n \"\"\"\n Construct a skeleton motion from a skeleton state. The velocities are estimated using second\n order guassian filter along the last axis. The skeleton state must have at least .dim >= 1\n\n :param skeleton_state: the skeleton state that the motion is based on \n :type skeleton_state: SkeletonState\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n assert (type(skeleton_state) == SkeletonState), \"expected type of {}, got {}\".format(SkeletonState, type(skeleton_state))\n global_velocity = SkeletonMotion._compute_velocity(p=skeleton_state.global_translation, time_delta=1 / fps)\n global_angular_velocity = SkeletonMotion._compute_angular_velocity(r=skeleton_state.global_rotation, time_delta=1 / fps)\n return cls.from_state_vector_and_velocity(\n skeleton_tree=skeleton_state.skeleton_tree,\n state_vector=skeleton_state.tensor,\n global_velocity=global_velocity,\n global_angular_velocity=global_angular_velocity,\n is_local=skeleton_state.is_local,\n fps=fps,\n )\n\n @staticmethod\n def _to_state_vector(rot, rt, vel, avel):\n state_shape = rot.shape[:-2]\n skeleton_state_v = SkeletonState._to_state_vector(rot, rt)\n v = vel.reshape(*(state_shape + (-1,)))\n av = avel.reshape(*(state_shape + (-1,)))\n skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1)\n return skeleton_motion_v\n\n @classmethod\n def from_dict(cls: Type[\"SkeletonMotion\"], dict_repr: OrderedDict, *args, **kwargs) -> \"SkeletonMotion\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n vel = TensorUtils.from_dict(dict_repr[\"global_velocity\"], *args, **kwargs)\n avel = TensorUtils.from_dict(dict_repr[\"global_angular_velocity\"], *args, **kwargs)\n return cls(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n is_local=dict_repr[\"is_local\"],\n fps=dict_repr[\"fps\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict([\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"global_velocity\", tensor_to_dict(self.global_velocity)),\n (\"global_angular_velocity\", tensor_to_dict(self.global_angular_velocity)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n (\"fps\", self.fps),\n ])\n\n @classmethod\n def from_fbx(\n cls: Type[\"SkeletonMotion\"],\n fbx_file_path,\n fbx_configs,\n skeleton_tree=None,\n is_local=True,\n fps=120,\n root_joint=\"\",\n root_trans_index=0,\n *args,\n **kwargs,\n ) -> \"SkeletonMotion\":\n \"\"\"\n Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree\n is not given, it will use the first frame of the mocap to construct the skeleton tree.\n\n :param fbx_file_path: the path of the fbx file\n :type fbx_file_path: string\n :param fbx_configs: the configuration in terms of {\"tmp_path\": ..., \"fbx_py27_path\": ...}\n :type fbx_configs: dict\n :param skeleton_tree: the optional skeleton tree that the rotation will be applied to\n :type skeleton_tree: SkeletonTree, optional\n :param is_local: the state vector uses local or global rotation as the representation\n :type is_local: bool, optional, default=True\n :rtype: SkeletonMotion\n \"\"\"\n joint_names, joint_parents, transforms, fps = fbx_to_array(fbx_file_path, fbx_configs, root_joint, fps)\n # swap the last two axis to match the convention\n local_transform = euclidean_to_transform(transformation_matrix=torch.from_numpy(np.swapaxes(np.array(transforms), -1, -2),).float())\n local_rotation = transform_rotation(local_transform)\n root_translation = transform_translation(local_transform)[..., root_trans_index, :]\n joint_parents = torch.from_numpy(np.array(joint_parents)).int()\n\n if skeleton_tree is None:\n local_translation = transform_translation(local_transform).reshape(-1, len(joint_parents), 3)[0]\n skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation)\n skeleton_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, r=local_rotation, t=root_translation, is_local=True)\n if not is_local:\n skeleton_state = skeleton_state.global_repr()\n return cls.from_skeleton_state(skeleton_state=skeleton_state, fps=fps)\n\n @staticmethod\n def _compute_velocity(p, time_delta, guassian_filter=True):\n velocity = np.gradient(p.numpy(), axis=-3) / time_delta\n if guassian_filter:\n velocity = torch.from_numpy(filters.gaussian_filter1d(velocity, 2, axis=-3, mode=\"nearest\")).to(p)\n else:\n velocity = torch.from_numpy(velocity).to(p)\n\n return velocity\n\n @staticmethod\n def _compute_angular_velocity(r, time_delta: float, guassian_filter=True):\n # assume the second last dimension is the time axis\n diff_quat_data = quat_identity_like(r).to(r)\n diff_quat_data[..., :-1, :, :] = quat_mul_norm(r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :]))\n diff_angle, diff_axis = quat_angle_axis(diff_quat_data)\n angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta\n if guassian_filter:\n angular_velocity = torch.from_numpy(filters.gaussian_filter1d(angular_velocity.numpy(), 2, axis=-3, mode=\"nearest\"),)\n return angular_velocity\n\n def crop(self, start: int, end: int, fps: Optional[int] = None):\n \"\"\"\n Crop the motion along its last axis. This is equivalent to performing a slicing on the\n object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the\n new fps provided must be a factor of the original fps. \n\n :param start: the beginning frame index\n :type start: int\n :param end: the ending frame index\n :type end: int\n :param fps: number of frames per second in the output (if not given the original fps will be used)\n :type fps: int, optional\n :rtype: SkeletonMotion\n \"\"\"\n if fps is None:\n new_fps = int(self.fps)\n old_fps = int(self.fps)\n else:\n new_fps = int(fps)\n old_fps = int(self.fps)\n assert old_fps % fps == 0, (\"the resampling doesn't support fps with non-integer division \"\n \"from the original fps: {} => {}\".format(old_fps, fps))\n skip_every = old_fps // new_fps\n s = slice(start, end, skip_every)\n z = self[..., s]\n\n rot = z.local_rotation if z.is_local else z.global_rotation\n rt = z.root_translation\n vel = z.global_velocity\n avel = z.global_angular_velocity\n return SkeletonMotion(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=z.skeleton_tree,\n is_local=z.is_local,\n fps=new_fps,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: \"SkeletonTree\",\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return SkeletonMotion.from_skeleton_state(\n super().retarget_to(\n joint_mapping,\n source_tpose_local_rotation,\n source_tpose_root_translation,\n target_skeleton_tree,\n target_tpose_local_rotation,\n target_tpose_root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n ),\n self.fps,\n )\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n )" }, { "identifier": "SkeletonState", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonState(Serializable):\n \"\"\"\n A skeleton state contains all the information needed to describe a static state of a skeleton.\n It requires a skeleton tree, local/global rotation at each joint and the root translation.\n\n Example:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> zero_pose = SkeletonState.zero_pose(t)\n >>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common`\n [plot of the ant at zero pose\n >>> local_rotation = zero_pose.local_rotation.clone()\n >>> local_rotation[2] = torch.tensor([0, 0, 1, 0])\n >>> new_pose = SkeletonState.from_rotation_and_root_translation(\n ... skeleton_tree=t,\n ... r=local_rotation,\n ... t=zero_pose.root_translation,\n ... is_local=True\n ... )\n >>> new_pose.local_rotation\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n >>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent\n [plot of the ant with the new pose\n >>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n\n Global/Local Representation (cont. from the previous example)\n >>> new_pose.is_local\n True\n >>> new_pose.tensor # this will return the local rotation followed by the root translation\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio\n torch.Size([55])\n >>> new_pose.global_repr().is_local\n False\n >>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation\n torch.Size([55])\n \"\"\"\n\n def __init__(self, tensor_backend, skeleton_tree, is_local):\n self._skeleton_tree = skeleton_tree\n self._is_local = is_local\n self.tensor = tensor_backend.clone()\n\n def __len__(self):\n return self.tensor.shape[0]\n\n @property\n def rotation(self):\n if not hasattr(self, \"_rotation\"):\n self._rotation = self.tensor[..., :self.num_joints * 4].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 4)))\n return self._rotation\n\n @property\n def _local_rotation(self):\n if self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def _global_rotation(self):\n if not self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def is_local(self):\n \"\"\" is the rotation represented in local frame? \n \n :rtype: bool\n \"\"\"\n return self._is_local\n\n @property\n def invariant_property(self):\n return {\"skeleton_tree\": self.skeleton_tree, \"is_local\": self.is_local}\n\n @property\n def num_joints(self):\n \"\"\" number of joints in the skeleton tree \n \n :rtype: int\n \"\"\"\n return self.skeleton_tree.num_joints\n\n @property\n def skeleton_tree(self):\n \"\"\" skeleton tree \n \n :rtype: SkeletonTree\n \"\"\"\n return self._skeleton_tree\n\n @property\n def root_translation(self):\n \"\"\" root translation \n \n :rtype: Tensor\n \"\"\"\n if not hasattr(self, \"_root_translation\"):\n self._root_translation = self.tensor[..., self.num_joints * 4:self.num_joints * 4 + 3]\n return self._root_translation\n\n @property\n def global_transformation(self):\n \"\"\" global transformation of each joint (transform from joint frame to global frame) \"\"\"\n # Forward Kinematics\n if not hasattr(self, \"_global_transformation\"):\n local_transformation = self.local_transformation\n global_transformation = []\n parent_indices = self.skeleton_tree.parent_indices.numpy()\n # global_transformation = local_transformation.identity_like()\n for node_index in range(len(self.skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index == -1:\n global_transformation.append(local_transformation[..., node_index, :])\n else:\n global_transformation.append(transform_mul(\n global_transformation[parent_index],\n local_transformation[..., node_index, :],\n ))\n self._global_transformation = torch.stack(global_transformation, axis=-2)\n return self._global_transformation\n\n @property\n def global_rotation(self):\n \"\"\" global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global\n F.O.R) \"\"\"\n if self._global_rotation is None:\n if not hasattr(self, \"_comp_global_rotation\"):\n self._comp_global_rotation = transform_rotation(self.global_transformation)\n return self._comp_global_rotation\n else:\n return self._global_rotation\n\n @property\n def global_translation(self):\n \"\"\" global translation of each joint \"\"\"\n if not hasattr(self, \"_global_translation\"):\n self._global_translation = transform_translation(self.global_transformation)\n return self._global_translation\n\n @property\n def global_translation_xy(self):\n \"\"\" global translation in xy \"\"\"\n trans_xy_data = self.global_translation.zeros_like()\n trans_xy_data[..., 0:2] = self.global_translation[..., 0:2]\n return trans_xy_data\n\n @property\n def global_translation_xz(self):\n \"\"\" global translation in xz \"\"\"\n trans_xz_data = self.global_translation.zeros_like()\n trans_xz_data[..., 0:1] = self.global_translation[..., 0:1]\n trans_xz_data[..., 2:3] = self.global_translation[..., 2:3]\n return trans_xz_data\n\n @property\n def local_rotation(self):\n \"\"\" the rotation from child frame to parent frame given in the order of child nodes appeared\n in `.skeleton_tree.node_names` \"\"\"\n if self._local_rotation is None:\n if not hasattr(self, \"_comp_local_rotation\"):\n local_rotation = quat_identity_like(self.global_rotation)\n for node_index in range(len(self.skeleton_tree)):\n parent_index = self.skeleton_tree.parent_indices[node_index]\n if parent_index == -1:\n local_rotation[..., node_index, :] = self.global_rotation[..., node_index, :]\n else:\n local_rotation[..., node_index, :] = quat_mul_norm(\n quat_inverse(self.global_rotation[..., parent_index, :]),\n self.global_rotation[..., node_index, :],\n )\n self._comp_local_rotation = local_rotation\n return self._comp_local_rotation\n else:\n return self._local_rotation\n\n @property\n def local_transformation(self):\n \"\"\" local translation + local rotation. It describes the transformation from child frame to \n parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` \"\"\"\n if not hasattr(self, \"_local_transformation\"):\n self._local_transformation = transform_from_rotation_translation(r=self.local_rotation, t=self.local_translation)\n return self._local_transformation\n\n @property\n def local_translation(self):\n \"\"\" local translation of the skeleton state. It is identical to the local translation in\n `.skeleton_tree.local_translation` except the root translation. The root translation is\n identical to `.root_translation` \"\"\"\n if not hasattr(self, \"_local_translation\"):\n broadcast_shape = (tuple(self.tensor.shape[:-1]) + (len(self.skeleton_tree),) + tuple(self.skeleton_tree.local_translation.shape[-1:]))\n local_translation = self.skeleton_tree.local_translation.broadcast_to(*broadcast_shape).clone()\n local_translation[..., 0, :] = self.root_translation\n self._local_translation = local_translation\n return self._local_translation\n\n # Root Properties\n @property\n def root_translation_xy(self):\n \"\"\" root translation on xy \"\"\"\n if not hasattr(self, \"_root_translation_xy\"):\n self._root_translation_xy = self.global_translation_xy[..., 0, :]\n return self._root_translation_xy\n\n @property\n def global_root_rotation(self):\n \"\"\" root rotation \"\"\"\n if not hasattr(self, \"_global_root_rotation\"):\n self._global_root_rotation = self.global_rotation[..., 0, :]\n return self._global_root_rotation\n\n @property\n def global_root_yaw_rotation(self):\n \"\"\" root yaw rotation \"\"\"\n if not hasattr(self, \"_global_root_yaw_rotation\"):\n self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation()\n return self._global_root_yaw_rotation\n\n # Properties relative to root\n @property\n def local_translation_to_root(self):\n \"\"\" The 3D translation from joint frame to the root frame. \"\"\"\n if not hasattr(self, \"_local_translation_to_root\"):\n self._local_translation_to_root = (self.global_translation - self.root_translation.unsqueeze(-1))\n return self._local_translation_to_root\n\n @property\n def local_rotation_to_root(self):\n \"\"\" The 3D rotation from joint frame to the root frame. It is equivalent to \n The root_R_world * world_R_node \"\"\"\n return (quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation)\n\n def compute_forward_vector(\n self,\n left_shoulder_index,\n right_shoulder_index,\n left_hip_index,\n right_hip_index,\n gaussian_filter_width=20,\n ):\n \"\"\" Computes forward vector based on cross product of the up vector with \n average of the right->left shoulder and hip vectors \"\"\"\n global_positions = self.global_translation\n # Perpendicular to the forward direction.\n # Uses the shoulders and hips to find this.\n side_direction = (global_positions[:, left_shoulder_index].numpy() - global_positions[:, right_shoulder_index].numpy() + global_positions[:, left_hip_index].numpy() - global_positions[:, right_hip_index].numpy())\n side_direction = (side_direction / np.sqrt((side_direction**2).sum(axis=-1))[..., np.newaxis])\n\n # Forward direction obtained by crossing with the up direction.\n forward_direction = np.cross(side_direction, np.array([[0, 1, 0]]))\n\n # Smooth the forward direction with a Gaussian.\n # Axis 0 is the time/frame axis.\n forward_direction = filters.gaussian_filter1d(forward_direction, gaussian_filter_width, axis=0, mode=\"nearest\")\n forward_direction = (forward_direction / np.sqrt((forward_direction**2).sum(axis=-1))[..., np.newaxis])\n\n return torch.from_numpy(forward_direction)\n\n @staticmethod\n def _to_state_vector(rot, rt):\n state_shape = rot.shape[:-2]\n vr = rot.reshape(*(state_shape + (-1,)))\n vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(*(state_shape + (-1,)))\n v = torch.cat([vr, vt], axis=-1)\n return v\n\n @classmethod\n def from_dict(cls: Type[\"SkeletonState\"], dict_repr: OrderedDict, *args, **kwargs) -> \"SkeletonState\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n return cls(\n SkeletonState._to_state_vector(rot, rt),\n SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n dict_repr[\"is_local\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict([\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n ])\n\n @classmethod\n def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True):\n \"\"\"\n Construct a skeleton state from rotation and root translation\n\n :param skeleton_tree: the skeleton tree\n :type skeleton_tree: SkeletonTree\n :param r: rotation (either global or local)\n :type r: Tensor\n :param t: root translation\n :type t: Tensor\n :param is_local: to indicate that whether the rotation is local or global\n :type is_local: bool, optional, default=True\n \"\"\"\n assert (r.dim() > 0), \"the rotation needs to have at least 1 dimension (dim = {})\".format(r.dim)\n state_vec = SkeletonState._to_state_vector(r, t)\n\n return cls(\n state_vec,\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n )\n\n @classmethod\n def zero_pose(cls, skeleton_tree):\n \"\"\"\n Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local\n rotation is 0 and root translation is also 0.\n\n :param skeleton_tree: the skeleton tree as the rigid body\n :type skeleton_tree: SkeletonTree\n \"\"\"\n return cls.from_rotation_and_root_translation(\n skeleton_tree=skeleton_tree,\n r=quat_identity([skeleton_tree.num_joints]),\n t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype),\n is_local=True,\n )\n\n def local_repr(self):\n \"\"\" \n Convert the skeleton state into local representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=True`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def global_repr(self):\n \"\"\" \n Convert the skeleton state into global representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=False`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if not self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.global_rotation,\n t=self.root_translation,\n is_local=False,\n )\n\n def _get_pairwise_average_translation(self):\n global_transform_inv = transform_inverse(self.global_transformation)\n p1 = global_transform_inv.unsqueeze(-2)\n p2 = self.global_transformation.unsqueeze(-3)\n\n pairwise_translation = (transform_translation(transform_mul(p1, p2)).reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3).mean(axis=0))\n return pairwise_translation\n\n def _transfer_to(self, new_skeleton_tree: SkeletonTree):\n old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree))\n return SkeletonState.from_rotation_and_root_translation(\n new_skeleton_tree,\n r=self.global_rotation[..., old_indices, :],\n t=self.root_translation,\n is_local=False,\n )\n\n def drop_nodes_by_names(self, node_names: List[str], estimate_local_translation_from_states: bool = True) -> \"SkeletonState\":\n \"\"\" \n Drop a list of nodes from the skeleton and re-compute the local rotation to match the \n original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n if estimate_local_translation_from_states:\n pairwise_translation = self._get_pairwise_average_translation()\n else:\n pairwise_translation = None\n new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(node_names, pairwise_translation)\n return self._transfer_to(new_skeleton_tree)\n\n def keep_nodes_by_names(self, node_names: List[str], estimate_local_translation_from_states: bool = True) -> \"SkeletonState\":\n \"\"\" \n Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local \n rotation to match the original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n return self.drop_nodes_by_names(\n list(filter(lambda x: (x not in node_names), self)),\n estimate_local_translation_from_states,\n )\n\n def _remapped_to(self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree):\n joint_mapping_inv = {target: source for source, target in joint_mapping.items()}\n reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(list(joint_mapping_inv))\n n_joints = (\n len(joint_mapping),\n len(self.skeleton_tree),\n len(reduced_target_skeleton_tree),\n )\n assert (len(set(n_joints)) == 1), \"the joint mapping is not consistent with the skeleton trees\"\n source_indices = list(map(\n lambda x: self.skeleton_tree.index(joint_mapping_inv[x]),\n reduced_target_skeleton_tree,\n ))\n target_local_rotation = self.local_rotation[..., source_indices, :]\n return SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=reduced_target_skeleton_tree,\n r=target_local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: SkeletonTree,\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. The function follows the procedures below.\n\n Steps:\n 1. Drop the joints from the source (self) that do not belong to the joint mapping\\\n with an implementation that is similar to \"keep_nodes_by_names()\" - take a\\\n look at the function doc for more details (same for source_tpose)\n \n 2. Rotate the source state and the source tpose by \"rotation_to_target_skeleton\"\\\n to align the source with the target orientation\n \n 3. Extract the root translation and normalize it to match the scale of the target\\\n skeleton\n \n 4. Extract the global rotation from source state relative to source tpose and\\\n re-apply the relative rotation to the target tpose to construct the global\\\n rotation after retargetting\n \n 5. Combine the computed global rotation and the root translation from 3 and 4 to\\\n complete the retargeting.\n \n 6. Make feet on the ground (global translation z)\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n\n # STEP 0: Preprocess\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=self.skeleton_tree,\n r=source_tpose_local_rotation,\n t=source_tpose_root_translation,\n is_local=True,\n )\n target_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=target_tpose_local_rotation,\n t=target_tpose_root_translation,\n is_local=True,\n )\n\n # STEP 1: Drop the irrelevant joints\n pairwise_translation = self._get_pairwise_average_translation()\n node_names = list(joint_mapping)\n new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(node_names, pairwise_translation)\n\n # TODO: combine the following steps before STEP 3\n source_tpose = source_tpose._transfer_to(new_skeleton_tree)\n source_state = self._transfer_to(new_skeleton_tree)\n\n source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree)\n source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree)\n\n # STEP 2: Rotate the source to align with the target\n new_local_rotation = source_tpose.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :])\n\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_tpose.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation),\n is_local=True,\n )\n\n new_local_rotation = source_state.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(rotation_to_target_skeleton, source_state.local_rotation[..., 0, :])\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_state.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation),\n is_local=True,\n )\n\n # STEP 3: Normalize to match the target scale\n root_translation_diff = (source_state.root_translation - source_tpose.root_translation) * scale_to_target_skeleton\n\n # STEP 4: the global rotation from source state relative to source tpose and\n # re-apply to the target\n current_skeleton_tree = source_state.skeleton_tree\n target_tpose_global_rotation = source_state.global_rotation[0, :].clone()\n for current_index, name in enumerate(current_skeleton_tree):\n if name in target_tpose.skeleton_tree:\n target_tpose_global_rotation[current_index, :] = target_tpose.global_rotation[target_tpose.skeleton_tree.index(name), :]\n\n global_rotation_diff = quat_mul_norm(source_state.global_rotation, quat_inverse(source_tpose.global_rotation))\n new_global_rotation = quat_mul_norm(global_rotation_diff, target_tpose_global_rotation)\n\n # STEP 5: Putting 3 and 4 together\n current_skeleton_tree = source_state.skeleton_tree\n shape = source_state.global_rotation.shape[:-1]\n shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1]\n new_global_rotation_output = quat_identity(shape)\n for current_index, name in enumerate(target_skeleton_tree):\n while name not in current_skeleton_tree:\n name = target_skeleton_tree.parent_of(name)\n parent_index = current_skeleton_tree.index(name)\n new_global_rotation_output[:, current_index, :] = new_global_rotation[:, parent_index, :]\n\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=new_global_rotation_output,\n t=target_tpose.root_translation + root_translation_diff,\n is_local=False,\n ).local_repr()\n\n return source_state\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. See the method `retarget_to()` for more information\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n assert (len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0), \"the retargeting script currently doesn't support vectorized operations\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n )" } ]
import glob import os import sys import pdb import os.path as osp import open3d as o3d import open3d.visualization.rendering as rendering import imageio import joblib import numpy as np import torch import random import matplotlib.pyplot as plt import cv2 import matplotlib as mpl from tqdm import tqdm from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from uhc.smpllib.smpl_mujoco import SMPL_BONE_ORDER_NAMES as joint_names from poselib.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState from scipy.spatial.transform import Rotation as sRot from tqdm import tqdm
19,852
sys.path.append(os.getcwd()) paused, reset, recording, image_list, writer, control, curr_zoom = False, False, False, [], None, None, 0.01 def pause_func(action): global paused paused = not paused print(f"Paused: {paused}") return True def reset_func(action): global reset reset = not reset print(f"Reset: {reset}") return True def record_func(action): global recording, writer if not recording: fps = 30 curr_video_file_name = "test.mp4" writer = imageio.get_writer(curr_video_file_name, fps=fps, macro_block_size=None) elif not writer is None: writer.close() writer = None recording = not recording print(f"Recording: {recording}") return True def capture_func(action): global capture capture = not capture return True def zoom_func(action): global control, curr_zoom curr_zoom = curr_zoom * 0.9 control.set_zoom(curr_zoom) print(f"Reset: {reset}") return True mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand'] Name = "getting_started" Title = "Getting Started" data_dir = "data/smpl" smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") # pkl_dir = "output/renderings/smpl_ego_long_8-2023-01-20-11:28:00.pkl" # pkl_dir = "output/renderings/smpl_im_comp_8-2023-02-05-15:36:14.pkl" pkl_dir = "output/renderings/smpl_im_comp_pnn_3-2023-03-07-14:31:50.pkl" Name = pkl_dir.split("/")[-1].split(".")[0] pkl_data = joblib.load(pkl_dir)
sys.path.append(os.getcwd()) paused, reset, recording, image_list, writer, control, curr_zoom = False, False, False, [], None, None, 0.01 def pause_func(action): global paused paused = not paused print(f"Paused: {paused}") return True def reset_func(action): global reset reset = not reset print(f"Reset: {reset}") return True def record_func(action): global recording, writer if not recording: fps = 30 curr_video_file_name = "test.mp4" writer = imageio.get_writer(curr_video_file_name, fps=fps, macro_block_size=None) elif not writer is None: writer.close() writer = None recording = not recording print(f"Recording: {recording}") return True def capture_func(action): global capture capture = not capture return True def zoom_func(action): global control, curr_zoom curr_zoom = curr_zoom * 0.9 control.set_zoom(curr_zoom) print(f"Reset: {reset}") return True mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand'] Name = "getting_started" Title = "Getting Started" data_dir = "data/smpl" smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") # pkl_dir = "output/renderings/smpl_ego_long_8-2023-01-20-11:28:00.pkl" # pkl_dir = "output/renderings/smpl_im_comp_8-2023-02-05-15:36:14.pkl" pkl_dir = "output/renderings/smpl_im_comp_pnn_3-2023-03-07-14:31:50.pkl" Name = pkl_dir.split("/")[-1].split(".")[0] pkl_data = joblib.load(pkl_dir)
mujoco_2_smpl = [mujoco_joint_names.index(q) for q in joint_names if q in mujoco_joint_names]
0
2023-10-15 19:05:47+00:00
24k
e4s2023/E4S2023
face_swap_video_pipeline.py
[ { "identifier": "OurSwapFacePipelineOptions", "path": "options/our_swap_face_pipeline_options.py", "snippet": "class OurSwapFacePipelineOptions:\n\n\tdef __init__(self):\n\t\tself.parser = ArgumentParser()\n\t\tself.initialize()\n\n\tdef initialize(self):\n\t\tself.parser.add_argument('--exp_dir', type=str, default=\"./tmp_exp\", help='Path to experiment output directory')\n\t\tself.parser.add_argument('--num_seg_cls', type=int, default=12,help='Segmentation mask class number')\n\t\tself.parser.add_argument('--source_frame_name', type=str, default=\"28494\", help='source frame number')\n\t\tself.parser.add_argument('--target_video_name', type=str, default=\"874\",help='target video name')\n # ================= 模型设置 相关 =====================\n\t\tself.parser.add_argument('--out_size', type=int, default=1024, help='output image size') \n\t\tself.parser.add_argument('--fsencoder_type', type=str, default=\"psp\", help='FS Encode网络类型') \n\t\tself.parser.add_argument('--remaining_layer_idx', type=int, default=13, help='剩余的几层不用mask')\n\t\tself.parser.add_argument('--outer_dilation', type=int, default=15, help='dilation 的宽度')\n\t\tself.parser.add_argument('--erode_radius', type=int, default=3, help='erode 的宽度')\n \n # ================= 数据集 相关 =====================\n\t\tself.parser.add_argument('--batch_size', default=1, type=int, help='Batch size for training')\n\t\tself.parser.add_argument('--workers', default=4, type=int, help='Number of train dataloader workers')\n\t\tself.parser.add_argument('--target_images_dir', default='/apdcephfs/share_1290939/zhianliu/py_projects/our_editing_swappingFace_video/01_2_face', type=str)\n\t\tself.parser.add_argument('--driven_images_dir', default='/apdcephfs/share_1290939/zhianliu/py_projects/our_editing_swappingFace_video/29698_to_01/driven', type=str)\n\t\tself.parser.add_argument('--UI_edit_masks_dir', default='/apdcephfs/share_1290939/zhianliu/py_projects/our_editing_swappingFace_video/29698_to_01/edit_mask', type=str)\n\t\tself.parser.add_argument('--swapped_style_vectors_dir', default='/apdcephfs/share_1290939/zhianliu/py_projects/our_editing_swappingFace_video/29698_to_01/FFHQ_model_video_swap_styleVec', type=str)\n\n # ================= 训练 相关 =====================\n\t\tself.parser.add_argument('--train_G', default=True, type=bool, help='Whether to train the model')\n\t\tself.parser.add_argument('--pti_learning_rate', default=1e-3, type=float, help='PTI learning rate')\n\t\tself.parser.add_argument('--stiching_learning_rate', default=1e-2, type=float, help='Stiching learning rate')\n\t\tself.parser.add_argument('--optim_name', default='adam', type=str, help='Which optimizer to use') \n\t\tself.parser.add_argument('--max_pti_steps', default=0, type=int, help='PTI finetune steps')\n\t\tself.parser.add_argument('--max_stiching_steps', default=100, type=int, help='Stiching finetune steps') \n\t\tself.parser.add_argument('--device', default='cuda:0', type=str, help='Which GPU(s) to use')\n \n # ================= Loss 相关 =====================\n\t\tself.parser.add_argument('--lpips_lambda', default=0.8, type=float, help='LPIPS loss multiplier factor')\n\t\tself.parser.add_argument('--id_lambda', default=0.1, type=float, help='ID loss multiplier factor')\n\t\tself.parser.add_argument('--id_loss_multiscale', default=True, type=bool, help='Whether to apply multi scale in ID loss') \n\t\tself.parser.add_argument('--face_parsing_lambda', default=0.1, type=float, help='Face parsing loss multiplier factor')\n\t\tself.parser.add_argument('--l2_lambda', default=1.0, type=float, help='L2 loss multiplier factor')\n\t\tself.parser.add_argument('--recolor_lambda', default=5.0, type=float, help='Recolor reg loss multiplier factor')\n \n # ================== 预训练模型 相关 ==================\n\t\tself.parser.add_argument('--learn_in_w', action='store_true', help='Whether to learn in w space instead of w+')\n # 是否从styleGAN的均值开始学习\n\t\tself.parser.add_argument('--start_from_latent_avg', action='store_true',default=True, help='Whether to add average latent vector to generate codes from encoder.')\n # styleGAN输出图片大小\n\t\tself.parser.add_argument('--output_size', default=1024, type=int, help='Output size of generator')\n\t\tself.parser.add_argument('--n_styles', default=18, type=int, help='StyleGAN层数')\n \n # ir_se50 预训练权重, for id_loss\n\t\t# self.parser.add_argument('--ir_se50_path', default='/apdcephfs/share_1290939/zhianliu/pretrained_models/pixel2style2pixel/model_ir_se50.pth', type=str, help='Path to ir_se50 model weights')\n\t\tself.parser.add_argument('--ir_se50_path',\n\t\t\t\t\t\t\t\t default='./pretrained/pixel2style2pixel/model_ir_se50.pth',\n\t\t\t\t\t\t\t\t type=str, help='Path to ir_se50 model weights')\n\t\t# self.parser.add_argument('--face_parsing_model_path', default='/apdcephfs/share_1290939/zhianliu/pretrained_models/CelebA-Mask-HQ-faceParser/model.pth', type=str, help='Path to face parsing model weights')\n\t\tself.parser.add_argument('--face_parsing_model_path',\n\t\t\t\t\t\t\t\t default='./pretrained/faceseg/model.pth',\n\t\t\t\t\t\t\t\t type=str, help='Path to face parsing model weights')\n\t\t# self.parser.add_argument('--checkpoint_path', default='/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/v_15_hybrid_stage1_seg12_finetuneGD_8A100_pspHyperParas_remainLyrIdx13_flip_FFHQ_300KIters/checkpoints/iteration_300000_belowPyTorch1_6.pt', type=str, help='Path to model checkpoint')\n\t\t# self.parser.add_argument('--checkpoint_path', default='/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/v_15_hybrid_stage1_seg12_finetuneGD_8A100_pspHyperParas_remainLyrIdx13_flip_FFHQ_300KIters/checkpoints/iteration_300000.pt', type=str, help='Path to model checkpoint')\n\t\tself.parser.add_argument('--checkpoint_path', default='./pretrained/E4S/iteration_300000.pt', type=str, help='Path to model checkpoint')\n\t\tself.parser.add_argument('--PTI_checkpoint_path', default='/apdcephfs/share_1290939/zhianliu/py_projects/pytorch-DDP-demo/work_dirs/v_18_video_swapping/musk_to_874/finetuned_G_lr1e3_iters150_erode.pth', type=str, help='Path to PTI finetuned model checkpoint')\n\t\t# self.parser.add_argument('--checkpoint_path', default='/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/v_15_hybrid_stage1_seg12_finetuneGD_8A100_pspHyperParas_remainLyrIdx13_flip_200KIters/checkpoints/iteration_120000.pt', type=str, help='Path to model checkpoint')\n\n\t\t\n\tdef parse(self):\n\t\topts = self.parser.parse_args()\n\t\treturn opts" }, { "identifier": "torch_utils", "path": "utils/torch_utils.py", "snippet": "def saveTensorToFile(tensor, save_path):\ndef interpolate(img, size):\ndef readImgAsTensor(img_path, gray=False, to_tensor=True, size=1024):\ndef featMap2im(var):\ndef tensor2im(var, is_zero_center: bool = True, ):\ndef im2tensor(var, add_c_dim: bool = False, norm: bool = True, std: bool = False):\ndef tensor2map(var,shown_mask_indices=None):\ndef vis_mask_in_color(mask):\ndef get_colors():\ndef vis_faces(log_hooks1):\ndef vis_faces_no_id(hooks_dict1, fig, gs, i):\ndef aggregate_loss_dict(agg_loss_dict):\ndef labelMap2OneHot(label, num_cls):\ndef remove_module_prefix(state_dict,prefix):\ndef requires_grad(model, flag=True):\ndef accumulate(model1, model2, decay=0.999):\n C, H, W = tensor.size()" }, { "identifier": "Net3", "path": "models/networks.py", "snippet": "class Net3(nn.Module):\n \"\"\" FSEncoder + styleGAN2 \"\"\"\n\n def __init__(self,opts,):\n super(Net3, self).__init__()\n self.opts=opts\n assert self.opts.fsencoder_type in [\"psp\",\"sean\"]\n if self.opts.fsencoder_type==\"psp\":\n self.encoder = FSEncoder_PSP(mode='ir_se', opts=self.opts)\n dim_s_code = 256 + 512 + 512\n else:\n self.encoder = FSEncoder_SEAN(input_nc=3, output_nc=512,in_size = 256)\n dim_s_code = 512\n \n self.split_layer_idx = 5\n self.remaining_layer_idx = self.opts.remaining_layer_idx\n \n # 区分component 的 W+ space 的 MLPs\n self.MLPs = nn.ModuleList()\n for i in range(self.opts.num_seg_cls):\n self.MLPs.append(\n LocalMLP(\n dim_component=dim_s_code,\n dim_style=512,\n num_w_layers= self.remaining_layer_idx if self.remaining_layer_idx != 17 else 18\n )\n )\n \n self.G = Generator(size=self.opts.out_size, style_dim=512, n_mlp=8, split_layer_idx = self.split_layer_idx, remaining_layer_idx = self.remaining_layer_idx)\n\n # styleGAN的参数是否更新\n if not self.opts.train_G:\n for param in self.G.parameters():\n param.requires_grad = False\n # 注意,styleGAN的8层FC是永远不更新的\n else:\n for param in self.G.style.parameters():\n param.requires_grad = False\n \n # styleGAN的倒数几层不更新 (包括convs 和 ToRGBs)\n if self.remaining_layer_idx != 17:\n for param in self.G.convs[-(17-self.remaining_layer_idx):].parameters():\n param.requires_grad = False\n for param in self.G.to_rgbs[-(17-self.remaining_layer_idx)//2 - 1:].parameters():\n param.requires_grad = False\n \n \n def forward(self, img,mask, resize=False, randomize_noise=True,return_latents=False):\n \"\"\"输入一张RGB图和对应的mask,\n (1) encoder 得到对应的F/S空间的特征,\n (2) 再送到styleGAN得到一张输出的图片\n\n Args:\n img (Tensor): 一对RGB图, each with shape [bs,3,1024,1024]\n mask ([type]): 一对RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n resize (bool, optional): G生成的图片是否 resize. Defaults to True.\n randomize_noise (bool, optional): 是否加入随机噪声. Defaults to True.\n return_latents (bool, optional): 是否返回style codes. Defaults to False.\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n codes=[]\n bs, num_comp = codes_vector.size(0), codes_vector.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](codes_vector[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 13, 512]\n \n \n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](codes_vector.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n # 1. 完全使用 style code i.e., G(w)\n images1, result_latent, structure_feats_GT = self.G([codes], structure_feats, mask, input_is_latent=True,\n randomize_noise=randomize_noise,return_latents=return_latents,\n use_structure_code=False)\n \n \n # # 2. 使用 style code 和 strcture code i.e., G(w,F)\n # images2, _ , _ = self.G([codes], structure_feats, mask, input_is_latent=True,\n # randomize_noise=randomize_noise,return_latents=return_latents,\n # use_structure_code=True)\n \n if return_latents:\n return images1, structure_feats_GT, result_latent\n else:\n return images1, structure_feats_GT\n\n def get_style(self, img, mask):\n \"\"\"输入一张RGB图和对应的mask, 得到各个component 对应的style codes\n \n Args:\n img (Tensor): RGB图, each with shape [bs,3,1024,1024]\n mask (Tensor): RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n \n Returns:\n structure_feats(Tensor): 图片的structure code, with shape [bs,512,32,32], 注意,这里其实是相对于StyleGAN第层输出的残差\n all_codes(Tensor): 各个component 对应的style codes, with shape [bs,#comp,18,512]。\n !!! 注意,前7层的各个compnent其实没有意义,只是为了统一接口让shape保持一致,用的时候只用第1个即可 !!!\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n codes=[]\n bs, num_comp = codes_vector.size(0), codes_vector.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](codes_vector[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 11,512]\n\n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](codes_vector.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n style_codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n return structure_feats, style_codes\n\n def get_style_vectors(self, img, mask):\n \"\"\"输入一张RGB图和对应的mask, 得到各个component 对应的style vectors\n \n Args:\n img (Tensor): RGB图, each with shape [bs,3,1024,1024]\n mask (Tensor): RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n \n Returns:\n style_vectors(Tensor): with shape [bs,#seg_cls,512]\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n style_vectors, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n style_vectors, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n \n return style_vectors, structure_feats\n \n def cal_style_codes(self,style_vectors):\n \"\"\"根据每个compnent的 style vector转到styleGAN的style code\"\"\"\n \n codes=[]\n bs, num_comp = style_vectors.size(0), style_vectors.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](style_vectors[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 11,512]\n\n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](style_vectors.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n style_codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n return style_codes\n\n def gen_img(self, struc_codes, style_codes, mask, randomize_noise=True, noise=None, return_latents=False):\n \"\"\"输入一张mask 和 对应各components的style codes,以及这张图片的structure code, 生成一张图片\n \n Args:\n style_codes (Tensor): 各个component 对应的style codes, with shape [bs,#comp,18,512]\n struc_codes (Tensor)\n mask (Tensor): mask图, with shape [bs,#seg_cls,1024,1024]\n \n randomize_noise (bool, optional): 是否加入随机噪声. Defaults to True.\n return_latents (bool, optional): 是否返回style codes. Defaults to False.\n\n Returns:\n [type]: [description]\n \"\"\"\n \n images, result_latent, structure_feats = self.G([style_codes], struc_codes, mask, input_is_latent=True,\n randomize_noise=randomize_noise,noise=noise,return_latents=return_latents,\n use_structure_code=False)\n\n if return_latents:\n return images, result_latent, structure_feats\n else:\n return images,-1, structure_feats" }, { "identifier": "get_transforms", "path": "datasets/dataset.py", "snippet": "def get_transforms(normalize=True, toTensor=True):\n transform_list = []\n if toTensor:\n transform_list += [transforms.ToTensor()]\n\n if normalize:\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)" }, { "identifier": "TO_TENSOR", "path": "datasets/dataset.py", "snippet": "TO_TENSOR = transforms.ToTensor()" }, { "identifier": "NORMALIZE", "path": "datasets/dataset.py", "snippet": "NORMALIZE = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))" }, { "identifier": "read_video_as_frames", "path": "gradio_utils/face_swapping.py", "snippet": "def read_video_as_frames(data_path: str,\n out_frames_folder: str = None,\n frame_period: int = 1\n ) -> (List[np.ndarray], List[str]):\n if out_frames_folder is not None:\n os.makedirs(out_frames_folder, exist_ok=True)\n reader = cv2.VideoCapture(data_path)\n frame_num = 0\n ret_frames = []\n ret_paths = []\n while reader.isOpened():\n success, image = reader.read() # BGR\n if not success: # finished\n break\n if frame_num % frame_period != 0: # skip\n continue\n if out_frames_folder is not None:\n save_path = os.path.join(out_frames_folder, 'frame_%05d.png' % frame_num)\n cv2.imwrite(save_path, image)\n ret_paths.append(save_path)\n ret_frames.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # RGB\n frame_num += 1\n reader.release()\n return ret_frames, ret_paths" }, { "identifier": "save_frames_as_video", "path": "gradio_utils/face_swapping.py", "snippet": "def save_frames_as_video(frames: Union[list, str],\n video_save_dir: str,\n video_save_fn: str = \"output.mp4\",\n frame_template: str = \"frame_%05d.png\",\n fps: int = 25,\n audio_from: str = None,\n delete_tmp_frames: bool = False,\n ):\n if isinstance(frames, str):\n frames_dir = frames\n elif isinstance(frames, list):\n frames_dir = os.path.join(video_save_dir, \"tmp_frames\")\n for idx, frame in enumerate(frames):\n frame.save(os.path.join(frames_dir, \"frame_%05d.png\" % idx))\n else:\n raise TypeError(\"Unsupported frames type.\")\n\n os.makedirs(video_save_dir, exist_ok=True)\n video_save_path = os.path.join(video_save_dir, video_save_fn)\n\n if audio_from is not None:\n print(\"use audio\")\n os.system(\n f\"ffmpeg -y -r {fps} -i {frames_dir}/{frame_template} -i {audio_from}\"\n f\" -map 0:v:0 -map 1:a:0? -c:a copy -c:v libx264 -r {fps} -crf 10 -pix_fmt yuv420p {video_save_path}\"\n )\n else:\n print(\"no audio\")\n os.system(\n f\"ffmpeg -y -r {fps} -i {frames_dir}/{frame_template} \"\n f\"-c:v libx264 -r {fps} -crf 10 -pix_fmt yuv420p {video_save_path}\"\n )\n\n if delete_tmp_frames:\n shutil.rmtree(frames_dir)\n for match in glob.glob(os.path.join(frames_dir, \"*.png\")):\n os.remove(match)\n\n print(f\"video saved to: {video_save_path}\")\n return video_save_path" }, { "identifier": "crop_and_align_face", "path": "gradio_utils/face_swapping.py", "snippet": "def crop_and_align_face(target_files, image_size=1024, scale=1.0, center_sigma=1.0, xy_sigma=3.0, use_fa=False):\n print('Aligning images')\n crops, orig_images, quads = crop_faces(image_size, target_files, scale, center_sigma=center_sigma,\n xy_sigma=xy_sigma, use_fa=use_fa)\n\n # crop 的逆变换,用于后期贴回到原始视频上去\n inv_transforms = [\n calc_alignment_coefficients(quad + 0.5, [[0, 0], [0, image_size], [image_size, image_size], [image_size, 0]])\n for quad in quads\n ]\n\n return crops, orig_images, quads, inv_transforms" }, { "identifier": "logical_or_reduce", "path": "gradio_utils/face_swapping.py", "snippet": "def logical_or_reduce(*tensors):\n return torch.stack(tensors, dim=0).any(dim=0)" }, { "identifier": "create_masks", "path": "gradio_utils/face_swapping.py", "snippet": "def create_masks(mask, operation='dilation', radius=0):\n temp = copy.deepcopy(mask)\n if operation == 'dilation':\n full_mask = dilation(temp, torch.ones(2 * radius + 1, 2 * radius + 1, device=mask.device), engine='convolution')\n border_mask = full_mask - temp\n elif operation == 'erosion':\n full_mask = erosion(temp, torch.ones(2 * radius + 1, 2 * radius + 1, device=mask.device), engine='convolution')\n border_mask = temp - full_mask\n # 'expansion' means to obtain a boundary that expands to both sides\n elif operation == 'expansion':\n full_mask = dilation(temp, torch.ones(2 * radius + 1, 2 * radius + 1, device=mask.device), engine='convolution')\n erosion_mask = erosion(temp, torch.ones(2 * radius + 1, 2 * radius + 1, device=mask.device),\n engine='convolution')\n border_mask = full_mask - erosion_mask\n\n border_mask = border_mask.clip(0, 1)\n content_mask = mask\n\n return content_mask, border_mask, full_mask" }, { "identifier": "get_facial_mask_from_seg19", "path": "gradio_utils/face_swapping.py", "snippet": "def get_facial_mask_from_seg19(seg_map_long: torch.LongTensor,\n target_size: tuple = None,\n edge_softer: SoftErosion = None,\n is_seg19: bool = False,\n ):\n \"\"\" segmentation format:\n 0 - background\n 1 - lip\n 2 - eyebrow\n 3 - eyes\n 4 - hair\n 5 - nose\n 6 - skin\n 7 - ear\n 8 - neck\n 9 - tooth\n 10 -\n 11 - earring\n \"\"\"\n if is_seg19:\n seg_map_long = torch.LongTensor(seg19_to_seg12(seg_map_long.cpu().numpy()))\n facial_indices = (1, 2, 3, 5, 6, 8, 9)\n mask = torch.zeros_like(seg_map_long, dtype=torch.long)\n for index in facial_indices:\n mask = mask + ((seg_map_long == index).long()) # either in {0,1}\n mask = mask.float()\n if target_size is not None:\n mask = F.interpolate(mask, size=target_size, mode=\"bilinear\", align_corners=True)\n if edge_softer is not None:\n mask, _ = edge_softer(mask)\n return mask.cpu().numpy()" }, { "identifier": "get_edge", "path": "gradio_utils/face_swapping.py", "snippet": "def get_edge(img: Image, threshold: int = 128) -> Image:\n img = np.array(img).astype(np.uint8)\n img_x = cv2.convertScaleAbs(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3))\n img_y = cv2.convertScaleAbs(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3))\n edge = cv2.addWeighted(img_x, 1, img_y, 1, 0)\n edge = cv2.cvtColor(edge, cv2.COLOR_RGB2GRAY)\n pos_big = np.where(edge >= threshold)\n pos_small = np.where(edge < threshold)\n # edge = cv2.GaussianBlur(edge, ksize=(3, 3), sigmaX=5)\n # edge[pos_big] = (edge[pos_big] * 1.05).clip(0, 255)\n # edge[pos_small] = (edge[pos_small] / 1.).clip(0, 255)\n # edge = cv2.GaussianBlur(edge, ksize=(5, 5), sigmaX=11)\n return Image.fromarray(edge.astype(np.uint8))" }, { "identifier": "blending_two_images_with_mask", "path": "gradio_utils/face_swapping.py", "snippet": "def blending_two_images_with_mask(bottom: Image, up: Image,\n up_ratio: float = 1.0,\n up_mask: np.ndarray = None\n ) -> Image:\n h, w = bottom.size\n if up_mask is None:\n up_mask = np.ones((h, w, 1), dtype=float)\n else:\n up_mask = up_mask.squeeze()[:, :, None]\n up_mask[np.isnan(up_mask)] = 0. # input may contain NAN\n assert 0.0 <= up_ratio <= 1.0, \"Blending Ratio should be in [0.0, 1.0]!\"\n up_mask *= up_ratio\n i_a = np.array(bottom)\n i_b = np.array(up)\n i_a = i_a * (1 - up_mask) + i_b * up_mask\n ret_image = Image.fromarray(i_a.astype(np.uint8).clip(0, 255))\n return ret_image" }, { "identifier": "SoftErosion", "path": "gradio_utils/face_swapping.py", "snippet": "class SoftErosion(torch.nn.Module):\n def __init__(self, kernel_size=15, threshold=0.6, iterations=1):\n super(SoftErosion, self).__init__()\n r = kernel_size // 2\n self.padding = r\n self.iterations = iterations\n self.threshold = threshold\n\n # Create kernel\n y_indices, x_indices = torch.meshgrid(torch.arange(0., kernel_size), torch.arange(0., kernel_size))\n dist = torch.sqrt((x_indices - r) ** 2 + (y_indices - r) ** 2)\n kernel = dist.max() - dist\n kernel /= kernel.sum()\n kernel = kernel.view(1, 1, *kernel.shape)\n self.register_buffer('weight', kernel)\n\n def forward(self, x):\n x = x.float()\n for i in range(self.iterations - 1):\n x = torch.min(x, F.conv2d(x, weight=self.weight, groups=x.shape[1], padding=self.padding))\n x = F.conv2d(x, weight=self.weight, groups=x.shape[1], padding=self.padding)\n\n mask = x >= self.threshold\n x[mask] = 1.0\n x[~mask] /= x[~mask].max()\n\n return x, mask" }, { "identifier": "init_facevid2vid_pretrained_model", "path": "swap_face_fine/face_vid2vid/drive_demo.py", "snippet": "def init_facevid2vid_pretrained_model(cfg_path, ckpt_path):\n \"\"\"\n 实例化 预训练的 face_vid2vid 模型\n \n \"\"\"\n generator, kp_detector, he_estimator = load_checkpoints(config_path=cfg_path, checkpoint_path=ckpt_path, gen=\"spade\", cpu=False)\n\n with open(cfg_path) as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n estimate_jacobian = config['model_params']['common_params']['estimate_jacobian']\n # print(f'estimate jacobian: {estimate_jacobian}')\n \n print(f'Load face_vid2vid pre-trained model success!')\n return generator, kp_detector, he_estimator, estimate_jacobian" }, { "identifier": "drive_source_demo", "path": "swap_face_fine/face_vid2vid/drive_demo.py", "snippet": "def drive_source_demo(\n source_im, target_ims, # 输入图片相关参数\n generator, kp_detector, he_estimator, estimate_jacobian # 模型相关参数\n ):\n \"\"\" \n 驱动 source image\n \n args:\n source_im (np.array): [H,W,3] 256*256 大小, [0,1]范围\n target_ims (List[np.array]): List 中每个图片的格式为[H,W,3] 256*256 大小, [0,1]范围\n return:\n \n predictions (List[np.array]): 驱动后的结果, List 中每个图片的格式为[H,W,3] 256*256 大小, [0,1]范围 \n \"\"\"\n \n predictions = make_animation(source_im, target_ims, generator, kp_detector, he_estimator, \n relative=True, adapt_movement_scale=True, estimate_jacobian=estimate_jacobian)\n \n return predictions" }, { "identifier": "init_faceParsing_pretrained_model", "path": "swap_face_fine/face_parsing/face_parsing_demo.py", "snippet": "def init_faceParsing_pretrained_model(ckpt_path):\n parser = FaceParser(seg_ckpt=ckpt_path)\n\n print(\"Load faceParsing pre-traiend model success!\")\n\n return parser" }, { "identifier": "faceParsing_demo", "path": "swap_face_fine/face_parsing/face_parsing_demo.py", "snippet": "def faceParsing_demo(model, img: Image, convert_to_seg12=True):\n \"\"\"\n 提取 img 的face segmentation map\n \n args:\n model (Object): 加载好的预训练模型\n img (PIL.Image): [0, 255]范围的 PIL.Image 格式图片\n \"\"\"\n with torch.no_grad():\n seg = model(img).cpu().numpy().astype(np.uint8)\n \n if convert_to_seg12:\n seg = __ffhq_masks_to_faceParser_mask_detailed(seg)\n return seg" }, { "identifier": "vis_parsing_maps", "path": "swap_face_fine/face_parsing/face_parsing_demo.py", "snippet": "def vis_parsing_maps(image, parsing_anno, stride=1):\n \"\"\" 将原图 和 seg map 放到一起可视化\n \n args:\n img (PIL.Image): [0, 255]范围的 PIL.Image 格式图片\n parsing_anno (np.array): parsing之后的seg map, size为 [512, 512]\n return:\n vis_im (np.array): 可视化图片, 用cv2保存\n \"\"\"\n # Colors for all 20 parts\n part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],\n [255, 0, 85], [255, 0, 170],\n [0, 255, 0], [85, 255, 0], [170, 255, 0],\n [0, 255, 85], [0, 255, 170],\n [0, 0, 255], [85, 0, 255], [170, 0, 255],\n [0, 85, 255], [0, 170, 255],\n [255, 255, 0], [255, 255, 85], [255, 255, 170],\n [255, 0, 255], [255, 85, 255], [255, 170, 255],\n [0, 255, 255], [85, 255, 255], [170, 255, 255]]\n\n im = image.resize((parsing_anno.shape[0], parsing_anno.shape[1]), Image.BILINEAR)\n im = np.array(im)\n vis_im = im.copy().astype(np.uint8)\n vis_parsing_anno = parsing_anno.copy().astype(np.uint8)\n vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)\n vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255\n\n num_of_class = np.max(vis_parsing_anno)\n\n for pi in range(1, num_of_class + 1):\n index = np.where(vis_parsing_anno == pi)\n vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]\n\n vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)\n # print(vis_parsing_anno_color.shape, vis_im.shape)\n vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)\n\n return vis_im" }, { "identifier": "GPENInfer", "path": "swap_face_fine/gpen/gpen_demo.py", "snippet": "class GPENInfer(object):\n def __init__(self):\n self.model = init_gpen_pretrained_model()\n\n def infer_image(self, img_lq: Image):\n img_lq = np.array(img_lq)\n img_hq = GPEN_demo(img_lq, self.model, aligned=False)\n img_hq = Image.fromarray(img_hq)\n return img_hq" }, { "identifier": "CodeFormerInfer", "path": "swap_face_fine/inference_codeformer.py", "snippet": "class CodeFormerInfer(object):\n def __init__(self):\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n bg_upsampler = None\n face_upsample = True\n # ------------------ set up background upsampler ------------------\n if bg_upsampler == 'realesrgan':\n bg_upsampler = set_realesrgan()\n else:\n bg_upsampler = None\n\n # ------------------ set up face upsampler ------------------\n if face_upsample:\n if bg_upsampler is not None:\n face_upsampler = bg_upsampler\n else:\n face_upsampler = set_realesrgan()\n else:\n face_upsampler = None\n\n self.face_upsampler = face_upsampler\n\n # ------------------ set up CodeFormer restorer -------------------\n # net = ARCH_REGISTRY.get('CodeFormer')(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(device)\n net = CodeFormer(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9,\n connect_list=['32', '64', '128', '256']).to(self.device)\n ckpt_path = './pretrained/codeformer/codeformer.pth'\n # ckpt_path = load_file_from_url(url=pretrain_model_url['restoration'], model_dir='weights/CodeFormer', progress=True, file_name=None)\n checkpoint = torch.load(ckpt_path)['params_ema']\n net.load_state_dict(checkpoint)\n net.eval()\n\n self.net = net\n\n # ------------------ set up FaceRestoreHelper -------------------\n if bg_upsampler is not None:\n print(f'Background upsampling: True, Face upsampling: {face_upsample}')\n else:\n print(f'Background upsampling: False, Face upsampling: {face_upsample}')\n\n @torch.no_grad()\n def infer_image(self, face_img, w=0.8, upscale=2):\n # prepare data\n face_img = face_img.resize((512, 512))\n cropped_face_t = img2tensor(np.array(face_img) / 255., bgr2rgb=False, float32=True)\n normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)\n cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device)\n\n try:\n with torch.no_grad():\n output = self.net(cropped_face_t, w=w, adain=True)[0]\n restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))\n del output\n torch.cuda.empty_cache()\n except Exception as error:\n print(f'\\tFailed inference for CodeFormer: {error}')\n restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))\n\n restored_face = self.face_upsampler.enhance(restored_face, outscale=upscale)[0]\n restored_face = restored_face[:, :, ::-1].astype('uint8')\n\n return Image.fromarray(restored_face)" }, { "identifier": "RealESRBatchInfer", "path": "swap_face_fine/realesr/image_infer.py", "snippet": "class RealESRBatchInfer(object):\n def __init__(self):\n self.device = \"cuda:0\"\n self.args = EmptyArgs()\n self.args.model_name = \"RealESRGAN_x4plus\"\n self.args.model_path = make_abs_path(\"../../../ReliableSwap/pretrained/third_party/RealESRGAN/RealESRGAN_x4plus.pth\")\n self.args.denoise_strength = 0.5\n self.args.face_enhance = False\n self.args.tile = 0\n self.args.gpu_id = \"0\"\n\n self.model = RRDBNet(\n num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4\n ).to(self.device)\n\n # prefer to use params_ema\n loadnet = torch.load(self.args.model_path, map_location=torch.device('cpu'))\n if 'params_ema' in loadnet:\n keyname = 'params_ema'\n else:\n keyname = 'params'\n self.model.load_state_dict(loadnet[keyname], strict=True)\n\n # self.upsampler = RealESRGANer(\n # scale=4,\n # model_path=self.args.model_path,\n # dni_weight=None,\n # model=self.model,\n # gpu_id=self.args.gpu_id,\n # )\n\n print(f\"[RealESRBatchInfer] loaded from {self.args.model_path}.\")\n\n @torch.no_grad()\n def infer_batch(self, source_tensor: torch.Tensor, out_hw: tuple = None):\n if out_hw is None:\n out_hw = source_tensor.shape[2:]\n source_down = (source_tensor * 0.5 + 0.5).clamp(0, 1)\n source_down = F.interpolate(source_down, size=(256, 256), mode=\"bilinear\", align_corners=True)\n result = self.model(source_down) # (B,3,1024,1024)\n result = F.interpolate(result, size=out_hw, mode=\"bilinear\", align_corners=True)\n result = (result * 2. - 1.).clamp(-1, 1)\n return result\n\n def infer_image(self, img: Image):\n img = np.array(img)\n img = torch.from_numpy(img).float().cuda()\n img = (img / 127.5) - 1.\n img = img.unsqueeze(0)\n img = rearrange(img, \"n h w c -> n c h w\").contiguous()\n res = self.infer_batch(img, out_hw=(1024, 1024))\n res = rearrange(res, \"n c h w -> n h w c\").contiguous()\n res = (res[0] * 127.5 + 127.5).clamp(0, 255).cpu().numpy().astype(np.uint8)\n return Image.fromarray(res)" }, { "identifier": "VideoSwapPTICoach", "path": "training/video_swap_ft_coach.py", "snippet": "class VideoSwapPTICoach:\n def __init__(self, opts, e4s_net=None, num_targets=50, erode=False,\n ):\n self.opts = opts\n\n self.erode = erode\n self.device = torch.device(\"cuda\", 0)\n # self.opts.device = self.device\n \n # 定义数据集\n self.dataset = self.configure_dataset(num_targets)\n if num_targets == -1:\n num_targets = len(self.dataset)\n self.num_targets = num_targets\n \n # 定义 loss function\n self.mse_loss = nn.MSELoss().to(self.device).eval()\n if self.opts.lpips_lambda > 0:\n self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval()\n if self.opts.id_lambda > 0:\n self.id_loss = IDLoss(self.opts).to(self.device).eval()\n if self.opts.face_parsing_lambda > 0:\n self.face_parsing_loss = FaceParsingLoss(self.opts).to(self.device).eval()\n \n # 初始化网络\n if e4s_net is None:\n self.net = Net3(self.opts)\n # print(self.device)\n self.net = nn.SyncBatchNorm.convert_sync_batchnorm(self.net)\n self.net = self.net.to(self.device)\n else:\n self.net = e4s_net\n \n # 加载整个模型预训练好的参数,作为初始化\n assert self.opts.checkpoint_path is not None, \"必须提供预训练好的参数!\"\n ckpt_dict = torch.load(self.opts.checkpoint_path)\n self.net.latent_avg = ckpt_dict['latent_avg'].to(self.device)\n self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict[\"state_dict\"],prefix=\"module.\"))\n print(\"Load pre-trained model success!\") \n \n # 初始化优化器\n self.optimizer = self.configure_optimizer()\n\n # # 保存优化后模型的地址\n # self.checkpoint_dir = os.path.join(self.opts.exp_dir, 'checkpoints')\n # os.makedirs(self.checkpoint_dir, exist_ok=True)\n \n # Initialize tensorborad logger\n self.log_dir = os.path.join(self.opts.exp_dir, 'logs_lr%f_iters%d_erode%d_run2'%(self.opts.pti_learning_rate, self.opts.max_pti_steps, self.opts.erode_radius))\n os.makedirs(self.log_dir, exist_ok=True)\n self.logger = SummaryWriter(logdir = self.log_dir)\n \n def configure_dataset(self, num_targets: int = -1):\n save_dir = self.opts.exp_dir\n ds = VideoFaceSwappingDataset(\n driven = sorted(glob.glob(os.path.join(save_dir,\"imgs\", \"D_*.png\")))[:num_targets],\n driven_recolor = sorted(glob.glob(os.path.join(save_dir, \"imgs\", \"D_recolor_*.png\")))[:num_targets],\n driven_mask = sorted(glob.glob(os.path.join(save_dir,\"mask\",\"D_mask_*.png\")))[:num_targets],\n driven_style_vector = sorted(glob.glob(os.path.join(save_dir,\"styleVec\",\"D_style_vec_*.pt\")))[:num_targets],\n target = sorted(glob.glob(os.path.join(save_dir,\"imgs\", \"T_*.png\")))[:num_targets],\n target_mask = sorted(glob.glob(os.path.join(save_dir,\"mask\",\"T_mask_*.png\")))[:num_targets],\n target_style_vector = sorted(glob.glob(os.path.join(save_dir,\"styleVec\",\"T_style_vec_*.pt\")))[:num_targets],\n img_transform=transforms.Compose([TO_TENSOR, NORMALIZE]),\n label_transform=transforms.Compose([TO_TENSOR])\n ) \n \n return ds\n \n def configure_optimizer(self):\n self.params = list(filter(lambda p: p.requires_grad ,list(self.net.parameters())))\n if self.opts.optim_name == 'adam':\n optimizer = torch.optim.Adam(self.params, lr=self.opts.pti_learning_rate)\n else:\n optimizer = Ranger(self.params, lr=self.opts.pti_learning_rate)\n return optimizer\n \n def calc_loss(self, img, img_recon, foreground_mask=None):\n \"\"\"\n img: target 图片\n img_recon: 当前得到的结果 \n \"\"\"\n loss_dict = {}\n loss = 0.0\n id_logs = None\n \n if foreground_mask is not None:\n img_recon = img_recon * foreground_mask\n img = img * foreground_mask \n \n if self.opts.id_lambda > 0:\n loss_id, sim_improvement, id_logs = self.id_loss(img_recon, img)\n loss_dict['loss_id'] = float(loss_id)\n loss_dict['id_improve'] = float(sim_improvement)\n loss += loss_id * self.opts.id_lambda\n if self.opts.l2_lambda > 0:\n loss_l2 = F.mse_loss(img_recon, img)\n loss_dict['loss_l2'] = float(loss_l2)\n loss += loss_l2 * self.opts.l2_lambda\n if self.opts.lpips_lambda > 0:\n loss_lpips = 0\n for i in range(3):\n loss_lpips_1 = self.lpips_loss(\n F.adaptive_avg_pool2d(img_recon,(1024//2**i,1024//2**i)), \n F.adaptive_avg_pool2d(img,(1024//2**i,1024//2**i))\n )\n loss_lpips += loss_lpips_1\n \n loss_dict['loss_lpips'] = float(loss_lpips)\n loss += loss_lpips * self.opts.lpips_lambda\n if self.opts.face_parsing_lambda > 0:\n loss_face_parsing, face_parsing_sim_improvement = self.face_parsing_loss(img_recon, img)\n loss_dict['loss_face_parsing'] = float(loss_face_parsing)\n loss_dict['face_parsing_improve'] = float(face_parsing_sim_improvement)\n loss += loss_face_parsing * self.opts.face_parsing_lambda\n \n loss_dict['loss'] = float(loss)\n return loss, loss_dict, id_logs\n \n @torch.no_grad()\n def recon_driven(self):\n self.net.eval()\n\n print('Reconstrcution driven videos...')\n for idx, (driven_image, driven_m, driven_s_v,\n target_image, target_m, target_s_v,\n driven_recolor_pil, driven_pil, target_pil) in tqdm(enumerate(self.dataset)): # 从idx = 0 开始\n \n driven_m = (driven_m*255).long().to(self.opts.device).unsqueeze(0)\n driven_onehot = torch_utils.labelMap2OneHot(driven_m, num_cls=self.opts.num_seg_cls)\n driven_style_vector = driven_s_v.to(self.opts.device).float()\n driven_style_code = self.net.cal_style_codes(driven_style_vector)\n \n recon_i, _, structure_feats_i = self.net.gen_img(torch.zeros(1,512,32,32).to(self.opts.device), driven_style_code, driven_onehot)\n # randomize_noise=False,noise=noise)\n torch_utils.tensor2im(recon_i[0]).save(os.path.join(self.opts.exp_dir, \"imgs\", \"D_finetuned_recon_%04d.png\"%idx))\n\n def train(self):\n self.train_e4s()\n\n def train_e4s(self):\n self.net.train()\n \n print('Fine tuning the network...')\n for step in trange(self.opts.max_pti_steps):\n step_loss_dict = defaultdict(list)\n t = (step + 1) / self.opts.max_pti_steps\n\n verbose_recon = None\n for idx, (driven_image, driven_m, driven_s_v,\n target_image, target_m, target_s_v,\n driven_recolor_pil, driven_pil, target_pil) in enumerate(tqdm(self.dataset,\n desc=f\"tuning e4s_g {step}/{self.opts.max_pti_steps}\",\n position=0,\n )): # 从idx = 0 开始\n driven_m = (driven_m*255).long().to(self.opts.device).unsqueeze(0)\n\n if self.erode:\n driven_pil = Image.fromarray(np.transpose((255*(driven_image.numpy()+1)/2).astype(np.uint8), (1,2,0)))\n driven_m_np = driven_m[0,0,:,:].cpu().numpy().astype(np.uint8)\n driven_m_eroded, erode_verbose = erode_mask(driven_m_np, driven_pil , radius=self.opts.erode_radius, verbose=True)\n driven_m = torch.from_numpy(driven_m_eroded).long().to(self.opts.device).unsqueeze(0).unsqueeze(0)\n\n driven_image = driven_image.to(self.opts.device).float().unsqueeze(0)\n driven_onehot = torch_utils.labelMap2OneHot(driven_m, num_cls=self.opts.num_seg_cls)\n driven_style_vector = driven_s_v.to(self.opts.device).float()\n driven_style_code = self.net.cal_style_codes(driven_style_vector)\n\n zero_latent = torch.zeros((1,512,32,32), requires_grad=False).to(self.opts.device)\n recon_i, _, structure_feats_i = self.net.gen_img(zero_latent, driven_style_code, driven_onehot)\n # in [-1,1]\n\n ''' also guided by recolor net '''\n recolor_i = torch_utils.im2tensor(driven_recolor_pil, std=False)\n\n mask_bg_and_hair = logical_or_reduce(*[driven_m == clz for clz in [0, 4, 11]])\n is_foreground = torch.logical_not(mask_bg_and_hair)\n foreground_mask = is_foreground.float()\n foreground_mask = F.interpolate(foreground_mask, (1024, 1024), mode='bilinear', align_corners=False)\n if self.erode:\n loss, loss_dict, id_logs = self.calc_loss(driven_image, recon_i, foreground_mask=foreground_mask)\n else:\n loss, loss_dict, id_logs = self.calc_loss(driven_image, recon_i)\n\n loss_recolor, _, _ = self.calc_loss(recolor_i, recon_i, foreground_mask=foreground_mask)\n loss += loss_recolor * self.opts.recolor_lambda # default: 0.5?\n \n if idx == 0:\n verbose_recon = np.array(torch_utils.tensor2im(recon_i[0]))\n\n step_loss_dict['loss'].append(loss.item())\n for k,v in loss_dict.items():\n if \"loss_\" in k:\n step_loss_dict[k].append(v)\n \n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n # 记录视频序列中第一张图片在每个step后的结果\n self.logger.add_image(\"image_recon\", verbose_recon, step, dataformats='HWC')\n\n # 记录 每个step 视频中所有帧的 平均loss\n log_dict = {}\n for key, losses in step_loss_dict.items():\n loss_mean = sum(losses) / len(losses)\n loss_max = max(losses)\n \n self.logger.add_scalar(f'loss_mean/{key}', loss_mean, step)\n self.logger.add_scalar(f'loss_max/{key}', loss_max, step)\n \n if step+1 == 100:\n save_dict = self.get_save_dict()\n torch.save(save_dict, os.path.join(self.opts.exp_dir, \"finetuned_G_lr%f_iters%d.pth\"%(self.opts.pti_learning_rate, step+1)))\n \n print('Finished fine-tuning e4s generator!')\n\n def checkpoint_me(self):\n save_name = 'finetuned_model_%d.pt'%self.opts.max_pti_steps\n save_dict = self.get_save_dict()\n checkpoint_path = os.path.join(self.checkpoint_dir, save_name)\n torch.save(save_dict, checkpoint_path)\n \n def get_save_dict(self):\n save_dict = {\n 'state_dict': self.net.state_dict(),\n 'opts': vars(self.opts),\n }\n # save the latent avg in state_dict for inference if truncation of w was used during training\n if self.opts.start_from_latent_avg:\n save_dict['latent_avg'] = self.net.latent_avg\n \n return save_dict\n\n def freeze_e4s_g(self):\n self.net.requires_grad_(False)" }, { "identifier": "swap_head_mask_hole_first", "path": "swap_face_fine/swap_face_mask.py", "snippet": "def swap_head_mask_hole_first(source, target):\n \"\"\" segmentation format:\n 0 - background\n 1 - lip\n 2 - eyebrow\n 3 - eyes\n 4 - hair\n 5 - nose\n 6 - skin\n 7 - ear\n 8 - neck\n 9 - tooth\n 10 - eyeglass\n 11 - earring\n \"\"\"\n # calculate the hole map fist\n source_bg_mask = np.logical_or(source == 4, source == 0) # hair, bg\n source_bg_mask = np.logical_or(source_bg_mask, source == 8) # neck\n source_bg_mask = np.logical_or(source_bg_mask, source == 7) # ear\n source_bg_mask = np.logical_or(source_bg_mask, source == 11) # earring\n source_face_mask = np.logical_not(source_bg_mask)\n\n target_bg_mask = np.logical_or(target == 4, target == 0) # hair, bg\n target_bg_mask = np.logical_or(target_bg_mask, target == 8) # neck\n target_bg_mask = np.logical_or(target_bg_mask, target == 7) # ear\n target_bg_mask = np.logical_or(target_bg_mask, target == 11) # earring\n target_face_mask = np.logical_not(target_bg_mask)\n\n face_overlap_mask = np.logical_and(source_face_mask, target_face_mask)\n hole_mask = np.logical_xor(face_overlap_mask, target_face_mask)\n\n # swap mask\n res = np.zeros_like(target)\n \n target_regions = [np.equal(target, i) for i in range(12)]\n source_regions = [np.equal(source, i) for i in range(12)]\n\n # adjust or finetune the hole mask\n eye_line = int(2 / 5 * target.shape[0])\n nose_line = int(3 / 5 * target.shape[0])\n if np.any(source == 3):\n eye_line = np.where(source == 3)[0].max() # eye lowest\n elif np.any(source == 2):\n eye_line = np.where(source == 2)[0].max() # eye_brow lowest\n if np.any(source == 5):\n nose_line = np.where(source == 5)[0].max() # nose lowest\n # hole_mask[np.logical_and(source_regions[4], target_regions[6])] = False # source hair & target skin, not\n # hole_mask[np.logical_and(source_regions[4], target_regions[2])] = False # source hair & target eyebrow, not\n # hole_mask[np.logical_and(source_regions[4], target_regions[3])] = False # source hair & target eye, not\n if len(hole_mask) >= eye_line:\n hole_mask[:eye_line, :] = False # higher than eyes set as False\n\n \"\"\" The background, neck, ear and earrings regions of target \"\"\"\n res[target_regions[0]] = 99 # a place-holder magic number for bg (target-bg)\n res[target_regions[8]] = 8 # neck (target-bg)\n # res[target_regions[4]] = 4 # hair, hair first as background\n res[target_regions[7]] = 7 # ear (target-bg)\n res[target_regions[11]] = 11 # earring (target-bg)\n\n # fill in the hole\n\n # res = fill_hole(res, hole_mask, radius=5, eye_line=eye_line, nose_line=nose_line)\n # res[hole_mask] = 4\n # hole_mask[:eye_line, :] = False # higher than eyes set as False\n\n \"\"\" The inner-face of source \"\"\"\n ''' op1. cairong version '''\n # res[source_regions[7]] = 7\n # res[source_regions[11]] = 11\n res[source_regions[1]] = 1 # lip\n res[source_regions[2]] = 2 # eyebrows\n res[np.logical_and(source_regions[4], target_regions[2])] = 2 # source hair & target eyebrows\n res[source_regions[3]] = 3 # eyes\n res[source_regions[5]] = 5 # nose\n res[source_regions[6]] = 6 # skin\n res[source_regions[9]] = 9 # mouth\n ''' op2. zhian version '''\n # res[np.logical_and(source_regions[1], np.not_equal(res, 99))] = 1 # lip\n # res[np.logical_and(source_regions[2], np.not_equal(res, 99))] = 2 # eyebrows\n # res[np.logical_and(source_regions[3], np.not_equal(res, 99))] = 3 # eyes\n # res[np.logical_and(source_regions[5], np.not_equal(res, 99))] = 5 # nose\n # res[np.logical_and(source_regions[6], np.not_equal(res, 99))] = 6 # skin\n # res[np.logical_and(source_regions[9], np.not_equal(res, 99))] = 9 # mouth\n\n \"\"\" Fix target foreground like hat occlusions \"\"\"\n # Additional foreground = (target_bg) && (source_skin higher than target_skin)\n H, W = target.shape\n target_skin_highest_by_width = np.ones(W, dtype=np.long) * H\n target_skin = np.zeros_like(target, dtype=target.dtype)\n target_skin[target_regions[6]] = 1\n target_skin = target_skin * (np.arange(H)[:, None])\n target_skin[target_skin == 0] = H\n target_skin_highest_by_width = target_skin.min(axis=0) # (W,)\n target_bg_region = np.where(target == 0)\n target_bg_positions_h = target_bg_region[0]\n target_bg_positions_w = target_bg_region[1]\n target_foreground_h_positions = []\n target_foreground_w_positions = []\n for i in range(len(target_bg_positions_h)):\n h = target_bg_positions_h[i]\n w = target_bg_positions_w[i]\n if h <= target_skin_highest_by_width[w] != H:\n target_foreground_h_positions.append(h)\n target_foreground_w_positions.append(w)\n target_foreground_region = (np.array(target_foreground_h_positions),\n np.array(target_foreground_w_positions))\n if len(target_foreground_h_positions) > 0:\n res[target_foreground_region] = 98 # additional foreground (target-foreground)\n\n # res[np.logical_and(source_regions[6], np.not_equal(res, 99))] = 6 # skin\n res[target_regions[4]] = 4 # not hair first (target-foreground), hair as foreground\n res[target_regions[10]] = 10 # eye_glass (target-foreground)\n # res[target_regions[7]] = 7 # removed, ear is background (target-background)\n\n \"\"\" The missing pixels, fill in skin temporarily \"\"\"\n ''' op1. cairong version '''\n res[res == 0] = 6 # fill hole with skin\n res[res == 99] = 0\n res[res == 98] = 0\n hole_map = res.copy()\n hole_map[hole_mask] = 17 # see: torch_utils.get_colors\n ''' op2. zhian version '''\n # if np.sum(res == 0) != 0:\n # hole_mask = 1 * (res == 0)\n # res[res == 0] = 6 # skin\n # else:\n # hole_mask = np.zeros_like(res)\n # hole_mask = hole_mask.astype(np.bool)\n # # hole_mask[0:eye_line] = False # set parts higher than eyes to zero(False)\n # hole_mask[source_regions[4]] = False # set source hair parts to zero(False)\n # res[res == 99] = 0 # restore the background\n # hole_map = res.copy()\n # hole_map[hole_mask] = 1\n\n \"\"\"\n res: 0-bg, 1-lip, 2-eyebrow, 3-eye, 4-hair, 5-nose, 6-skin, 7-ear, 8-neck\n hole_mask: in {True,False}\n hole_map: in {0,...,11}\n \"\"\"\n return res, hole_mask, hole_map, nose_line" }, { "identifier": "swap_comp_style_vector", "path": "swap_face_fine/swap_face_mask.py", "snippet": "def swap_comp_style_vector(style_vectors1, style_vectors2, comp_indices=[], belowFace_interpolation=False):\n \"\"\"替换 style_vectors1 中某个component的 style vectors\n\n Args:\n style_vectors1 (Tensor): with shape [1,#comp,512], target image 的 style vectors\n style_vectors2 (Tensor): with shape [1,#comp,512], source image 的 style vectors\n \"\"\"\n assert comp_indices is not None\n\n style_vectors = copy.deepcopy(style_vectors1)\n\n for comp_idx in comp_indices:\n style_vectors[:, comp_idx, :] = style_vectors2[:, comp_idx, :]\n\n # 额外处理一下耳朵 和 耳环部分\n\n # 如果 source 没有耳朵,那么就用 source 和target 耳朵style vectors的平均值 (为了和皮肤贴合)\n # if torch.sum(style_vectors2[:,7,:]) == 0:\n style_vectors[:, 7, :] = (style_vectors1[:, 7, :] + style_vectors2[:, 7, :]) / 2\n\n # 直接用 target 耳环的style vector\n style_vectors[:, 11, :] = style_vectors1[:, 11, :]\n\n # 脖子用二者的插值\n if belowFace_interpolation:\n style_vectors[:, 8, :] = (style_vectors1[:, 8, :] + style_vectors2[:, 8, :]) / 2\n\n # 如果source 没有牙齿,那么用 target 牙齿的 style vector\n if torch.sum(style_vectors2[:, 9, :]) == 0:\n style_vectors[:, 9, :] = style_vectors1[:, 9, :]\n\n return style_vectors" }, { "identifier": "blending", "path": "swap_face_fine/multi_band_blending.py", "snippet": "def blending(full_img, ori_img, mask):\n height, width = ori_img.shape[:2]\n\n mask_sharp = 1\n \n \"\"\"\n try:\n new_h = 2 ** (int(np.log2(height)) + 1)\n new_w = 2 ** (int(np.log2(width)) + 1)\n full_img, ori_img, full_mask = [cv2.resize(x, (new_h, new_w)) for x in (full_img, ori_img, np.float32(mask_sharp * mask))]\n # full_img = cv2.convertScaleAbs(ori_img*(1-full_mask) + full_img*full_mask)\n img = Laplacian_Pyramid_Blending_with_mask(full_img, ori_img, full_mask, 10)\n except:\n \"\"\"\n new_h = 1024\n new_w = 1024\n full_img, ori_img, full_mask = [cv2.resize(x, (new_h, new_w)) for x in (full_img, ori_img, np.float32(mask_sharp * mask))]\n # full_img = cv2.convertScaleAbs(ori_img*(1-full_mask) + full_img*full_mask)\n img = Laplacian_Pyramid_Blending_with_mask(full_img, ori_img, full_mask, 10)\n\n ### img in [0, 255]\n img = np.clip(img, 0 ,255)\n img = np.uint8(cv2.resize(img, (width, height)))\n return img" }, { "identifier": "BlenderInfer", "path": "swap_face_fine/Blender/inference.py", "snippet": "class BlenderInfer(object):\n def __init__(self):\n parser = get_base_parser()\n parser = add_hyper(parser)\n args = parser.parse_args()\n args.eval_only = True\n # args.small_FPN = True\n # print('[DEBUG inference] args:', args)\n\n netG = Blender(args).cuda()\n\n load_path = './pretrained/face_blender/latest_netG.pth'\n\n netG_params = torch.load(load_path)\n netG.load_state_dict(netG_params)\n netG.requires_grad_(False)\n netG.eval()\n\n self.netG = netG\n\n @torch.no_grad()\n def infer_image(self, img_a, img_t, mask_a, mask_t):\n \"\"\"\n Transfer the color of img_t to img_a.\n \"\"\"\n img_a = img_a.resize((256, 256)).convert('RGB')\n img_t = img_t.resize((256, 256)).convert('RGB')\n\n mask_a = mask_a.resize((256, 256)).convert('L')\n mask_t = mask_t.resize((256, 256)).convert('L')\n\n to_tensor = transforms.ToTensor()\n to_norm = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n\n mask_a = torch.tensor(np.array(mask_a)).unsqueeze(0).cuda()\n mask_t = torch.tensor(np.array(mask_t)).unsqueeze(0).cuda()\n\n img_a = to_norm(to_tensor(img_a)).unsqueeze(0).cuda()\n img_t = to_norm(to_tensor(img_t)).unsqueeze(0).cuda()\n\n with torch.no_grad():\n img_pred, _, _, _ = self.netG(img_a, img_t, mask_a, mask_t)\n\n img_pred = img_pred[0].permute(1, 2, 0).cpu().data.numpy()\n\n return Image.fromarray(np.uint8(img_pred * 255))" } ]
import argparse import copy import os import datetime import glob import cv2 import numpy as np import torch import torch.nn as nn import torchvision.transforms as transforms from tqdm import tqdm from PIL import Image from skimage.transform import resize from torch.nn import functional as F from options.our_swap_face_pipeline_options import OurSwapFacePipelineOptions from utils import torch_utils from models.networks import Net3 from datasets.dataset import get_transforms, TO_TENSOR, NORMALIZE from gradio_utils.face_swapping import ( read_video_as_frames, save_frames_as_video, crop_and_align_face, logical_or_reduce, create_masks, get_facial_mask_from_seg19, get_edge, blending_two_images_with_mask, SoftErosion, ) from swap_face_fine.face_vid2vid.drive_demo import init_facevid2vid_pretrained_model, drive_source_demo from swap_face_fine.face_parsing.face_parsing_demo import ( init_faceParsing_pretrained_model, faceParsing_demo, vis_parsing_maps ) from swap_face_fine.gpen.gpen_demo import GPENInfer from swap_face_fine.inference_codeformer import CodeFormerInfer from swap_face_fine.realesr.image_infer import RealESRBatchInfer from training.video_swap_ft_coach import VideoSwapPTICoach from swap_face_fine.swap_face_mask import swap_head_mask_hole_first, swap_comp_style_vector from swap_face_fine.multi_band_blending import blending from swap_face_fine.Blender.inference import BlenderInfer
21,055
swapped_style_vectors = swap_comp_style_vector(T_style_vector, D_style_vector, list(comp_indices), belowFace_interpolation=False) with torch.no_grad(): swapped_style_codes = net.cal_style_codes(swapped_style_vectors) swapped_face, _, structure_feats = net.gen_img(torch.zeros(1, 512, 32, 32).to(opts.device), swapped_style_codes, swappped_one_hot) # in [-1,1] ''' save images ''' swapped_face_image = torch_utils.tensor2im(swapped_face) swapped_face_image = swapped_face_image.resize((512, 512)).resize((1024, 1024)) swapped_m = transforms.Compose([TO_TENSOR])(swapped_msk) swapped_m = (swapped_m * 255).long().to(opts.device).unsqueeze(0) swapped_face_image.save(os.path.join(swap_save_dir, "pti_gen_%04d.png" % i)) swaps_face.append(swapped_face_image) outer_dilation = 5 # 这个值可以调节 mask_bg = logical_or_reduce(*[swapped_m == clz for clz in [0, 11, 7, 4, 8]]) # 4,8,7 # 如果是视频换脸,考虑把头发也弄进来当做背景的一部分, 11 earings 4 hair 8 neck 7 ear is_foreground = torch.logical_not(mask_bg) hole_index = hole_mask[None][None] is_foreground[hole_index[None]] = True foreground_mask = is_foreground.float() # foreground_mask = dilation(foreground_mask, torch.ones(2 * outer_dilation + 1, 2 * outer_dilation + 1, device=foreground_mask.device), engine='convolution') content_mask, border_mask, full_mask = create_masks(foreground_mask, operation='expansion', radius=5) # past back content_mask = F.interpolate(content_mask, (1024, 1024), mode='bilinear', align_corners=False) content_mask = content_mask[0, 0, :, :, None].cpu().numpy() border_mask = F.interpolate(border_mask, (1024, 1024), mode='bilinear', align_corners=False) border_mask = border_mask[0, 0, :, :, None].cpu().numpy() border_mask = np.repeat(border_mask, 3, axis=-1) swapped_and_pasted = swapped_face_image * content_mask + T * (1 - content_mask) swapped_and_pasted = Image.fromarray(blending(np.array(T), swapped_and_pasted, mask=border_mask)) pasted_image = swapped_and_pasted if targets_inv_trans is None: # op1. directly paste pasted_image.save(os.path.join(swap_save_dir, "swap_face_%04d.png"%i)) else: # op2. paste back swapped_and_pasted = swapped_and_pasted.convert('RGBA') pasted_image = targets_ori[i].convert('RGBA') swapped_and_pasted.putalpha(255) projected = swapped_and_pasted.transform(targets_ori[i].size, Image.PERSPECTIVE, targets_inv_trans[i], Image.BILINEAR) pasted_image.alpha_composite(projected) pasted_image.save(os.path.join(swap_save_dir, "swap_face_%04d.png" % i)) return { "swaps_face": swaps_face, "swaps_mask": swaps_mask, } def _prepare_outputs(self, result_video_fn: str, target_video_path: str): out_dir = self.out_dir swap_save_dir = os.path.join(self.out_dir, "swapped") save_frames_as_video( frames=swap_save_dir, video_save_dir=out_dir, video_save_fn=result_video_fn, frame_template="swap_face_%04d.png", audio_from=target_video_path, delete_tmp_frames=False, ) return def _load_face_reenact_model(self): if len(self.face_reenact_model.items()) > 0: return self.face_reenact_model face_vid2vid_cfg = "./pretrained/faceVid2Vid/vox-256.yaml" face_vid2vid_ckpt = "./pretrained/faceVid2Vid/00000189-checkpoint.pth.tar" generator, kp_detector, he_estimator, estimate_jacobian = init_facevid2vid_pretrained_model( face_vid2vid_cfg, face_vid2vid_ckpt ) self.face_reenact_model["generator"] = generator self.face_reenact_model["kp_detector"] = kp_detector self.face_reenact_model["he_estimator"] = he_estimator self.face_reenact_model["estimate_jacobian"] = estimate_jacobian print("[FaceSwapVideoPipeline] Face reenactment model loaded.") return self.face_reenact_model def _free_face_reenact_model(self): keys = self.face_reenact_model.keys() for k in tuple(keys): del self.face_reenact_model[k] print("[FaceSwapVideoPipeline] Face reenactment model free memory.") self.face_reenact_model = {} def _load_face_parsing_model(self): if len(self.face_parsing_model.items()) > 0: return self.face_parsing_model face_parsing_ckpt = "./pretrained/faceseg/79999_iter.pth" self.face_parsing_model["model"] = init_faceParsing_pretrained_model(face_parsing_ckpt) print("[FaceSwapVideoPipeline] Face parsing model loaded.") return self.face_parsing_model def _load_face_enhance_model(self, name: str = "gpen"): if self.face_enhance_model.get(name) is not None: return self.face_enhance_model if name == "gpen": self.face_enhance_model["gpen"] = GPENInfer() elif name == "codeformer": self.face_enhance_model["codeformer"] = CodeFormerInfer() elif name == "realesr": self.face_enhance_model["realesr"] = RealESRBatchInfer() else: raise KeyError(f"Not supported face enhancement model: {name}") print(f"[FaceSwapVideoPipeline] Face enhancing model loaded: {name}") return self.face_enhance_model def _load_e4s_model(self): if not self.e4s_model is None: return self.e4s_model opts = self.e4s_opt
class FaceSwapVideoPipeline(object): def __init__(self, e4s_opt: argparse.Namespace, use_time_subfolder: bool = True, ): self.exp_root = e4s_opt.exp_dir self.out_dir = e4s_opt.exp_dir self.pti_save_fn = None self.use_time_subfolder = use_time_subfolder self.e4s_opt = e4s_opt self.e4s_model = None self.device = e4s_opt.device # models are lazy loaded self.face_reenact_model = {} self.face_parsing_model = {} self.face_enhance_model = {} self.face_recolor_model = {} self.mask_softer_model = {} self.num_seg_cls = 12 # fixed def forward(self, target_video_path: str, source_image_path: str, result_video_fn: str = "output.mp4", use_crop: bool = True, target_frames_cnt: int = -1, use_pti: bool = True, pti_resume_weight_path: str = "./video_outputs/finetuned_G_lr0.001000_iters80.pth", ): """ @param target_video_path: @param source_image_path: @param result_video_fn: @param use_crop: @param target_frames_cnt: @param use_pti: @param pti_resume_weight_path: if opt.max_pti_steps == 0, the pipeline will use this pre-trained weight file """ # 0. update time, used as output directory self._update_out_dir() # 1. prepare input target and source target_paths, source_paths = self._prepare_inputs( target_video_path, source_image_path, target_frames_cnt=target_frames_cnt, ) target_frames_cnt = len(target_paths) # 2. crop and align crop_results = self._process_crop_align( target_paths, source_paths, use_crop=use_crop, ) T = crop_results["targets_crop"] S = crop_results["source_crop"] T_ori = crop_results["targets_ori"] T_inv_trans = crop_results["targets_inv_trans"] # 3. face reenactment drivens, drivens_recolor = self._process_face_reenact( T, S, use_recolor=True ) # 4. face enhancement # drivens = self._process_face_enhance( # drivens, model_name="codeformer", # ) # if drivens_recolor[0] is not None: # drivens_recolor = self._process_face_enhance( # drivens_recolor, model_name="codeformer", save_prefix="D_recolor_" # ) # 5. face parsing parsing_results = self._process_face_parsing( T, S, drivens ) T_mask = parsing_results["targets_mask"] S_mask = parsing_results["source_mask"] D_mask = parsing_results["drivens_mask"] # 6. extract initial style vectors self._process_extract_init_style_vectors( drivens, T, drivens_mask=D_mask, targets_mask=T_mask ) # 7. PTI tuning if use_pti: self._process_pti_tuning( pti_resume_weight_path, target_frames_cnt=target_frames_cnt, ) # 8. face swapping swap_results = self._process_face_swapping( target_frames_cnt, T_inv_trans, T_ori, ) swaps_face = swap_results["swaps_face"] # each is: PIL.Image swaps_mask = swap_results["swaps_mask"] # each is: np.ndarray(512,512), in {0,...,9} # 9. prepare outputs self._prepare_outputs( result_video_fn, target_video_path ) def _update_out_dir(self): if not self.use_time_subfolder: return now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") out_dir = os.path.join(self.exp_root, now) os.makedirs(out_dir, exist_ok=True) self.out_dir = out_dir self.e4s_opt.exp_dir = out_dir print(f"[FaceSwapVideoPipeline] out directory changed to: {self.out_dir}") return def _prepare_inputs(self, target_video_path: str, source_image_path: str, target_frames_cnt: int = 120, ): in_target_frames_folder = os.path.join(self.out_dir, "in_frames/") t_frames, t_paths = read_video_as_frames(target_video_path, in_target_frames_folder) t_frames = t_frames[:target_frames_cnt] t_paths = t_paths[:target_frames_cnt] # many targets s_paths = [source_image_path] # only 1 source # save inputs target_save_path = os.path.join(self.out_dir, "target.mp4") source_save_path = os.path.join(self.out_dir, "source.png") os.system(f"cp {target_video_path} {target_save_path}") os.system(f"cp {source_image_path} {source_save_path}") return t_paths, s_paths def _process_crop_align(self, t_paths: list, s_paths: list, use_crop: bool): if use_crop: target_files = [(os.path.basename(f).split('.')[0], f) for f in t_paths] source_files = [(os.path.basename(f).split('.')[0], f) for f in s_paths] target_crops, target_orig_images, target_quads, target_inv_transforms = crop_and_align_face( target_files, image_size=1024, scale=1.0, center_sigma=1.0, xy_sigma=3.0, use_fa=False ) T = [crop.convert("RGB") for crop in target_crops] source_crops, source_orig_images, source_quads, source_inv_transforms = crop_and_align_face( source_files, image_size=1024, scale=1.0, center_sigma=0, xy_sigma=0, use_fa=False ) S = source_crops[0].convert("RGB") T_ori = target_orig_images T_inv_trans = target_inv_transforms else: T = [Image.open(t).convert("RGB").resize((1024, 1024)) for t in t_paths] S = Image.open(s_paths[0]).convert("RGB").resize((1024, 1024)) T_ori = T T_inv_trans = None return { "targets_crop": T, "source_crop": S, "targets_ori": T_ori, "targets_inv_trans": T_inv_trans } def _process_face_parsing(self, targets, source, drivens): self._load_face_parsing_model() face_parsing_model = self.face_parsing_model["model"] print("[FaceSwapVideoPipeline] face parsing...") T_mask = [faceParsing_demo(face_parsing_model, frm, convert_to_seg12=True) for frm in targets] # 12 S_mask = faceParsing_demo(face_parsing_model, source, convert_to_seg12=True) D_mask = [faceParsing_demo(face_parsing_model, d, convert_to_seg12=True) for d in drivens] save_img_dir = os.path.join(self.out_dir, "imgs") save_mask_dir = os.path.join(self.out_dir, "mask") os.makedirs(save_img_dir, exist_ok=True) os.makedirs(save_mask_dir, exist_ok=True) for i in range(len(T_mask)): targets[i].save(os.path.join(save_img_dir, "T_%04d.png" % i)) Image.fromarray(T_mask[i]).save(os.path.join(save_mask_dir, "T_mask_%04d.png" % i)) Image.fromarray(D_mask[i]).save(os.path.join(save_mask_dir, "D_mask_%04d.png" % i)) D_mask_vis = vis_parsing_maps(drivens[i], D_mask[i]) Image.fromarray(D_mask_vis).save(os.path.join(save_mask_dir, "D_mask_vis_%04d.png" % i)) Image.fromarray(S_mask).save(os.path.join(save_mask_dir, "S_mask.png")) return { "targets_mask": T_mask, "source_mask": S_mask, "drivens_mask": D_mask, } def _process_face_reenact(self, targets, source, use_recolor: bool = False): self._load_face_reenact_model() generator = self.face_reenact_model["generator"] kp_detector = self.face_reenact_model["kp_detector"] he_estimator = self.face_reenact_model["he_estimator"] estimate_jacobian = self.face_reenact_model["estimate_jacobian"] print("[FaceSwapVideoPipeline] face reenacting...") targets_256 = [resize(np.array(im) / 255.0, (256, 256)) for im in targets] source_256 = resize(np.array(source) / 255.0, (256, 256)) predictions = drive_source_demo(source_256, targets_256, generator, kp_detector, he_estimator, estimate_jacobian) predictions = [(pred * 255).astype(np.uint8) for pred in predictions] # RGB predictions = self._process_face_enhance( predictions, model_name="gpen", ) # fixed as gpen ''' color transfer before pasting back ''' predictions_recolor = [None] * len(predictions) if use_recolor: predictions_recolor = [None] * len(predictions) face_parsing_model = self._load_face_parsing_model()["model"] face_enhance_model = self._load_face_enhance_model("codeformer")["codeformer"] recolor_save_dir = os.path.join(self.out_dir, "recolor_before_rgi") os.makedirs(recolor_save_dir, exist_ok=True) face_recolor_model = self._load_face_recolor_model()["model"] mask_softer_model = self._load_mask_softer()["model"] for i in range(len(predictions)): # swapped_face_image = Image.fromarray(predictions[i]) swapped_face_image = predictions[i] swapped_face_image.save(os.path.join(recolor_save_dir, "recolor_input_%04d.png" % i)) T = targets[i].resize(swapped_face_image.size) swap_mask_19 = faceParsing_demo(face_parsing_model, swapped_face_image, convert_to_seg12=False) target_mask_19 = faceParsing_demo(face_parsing_model, T, convert_to_seg12=False) recolor: Image = face_recolor_model.infer_image( swapped_face_image, T, Image.fromarray(swap_mask_19), Image.fromarray(target_mask_19) ) recolor.save(os.path.join(recolor_save_dir, "recolor_gen_%04d.png" % i)) recolor = recolor.resize(swapped_face_image.size) recolor = face_enhance_model.infer_image(recolor) # no need to super-res? recolor = recolor.resize((512, 512)).resize(recolor.size) # resize down to avoid too high-res in video recolor.save(os.path.join(recolor_save_dir, "gen_enhance_%04d.png" % i)) # only copy low-frequency parts # blending_mask = get_facial_mask_from_seg19( # torch.LongTensor(swap_mask_19[None, None, :, :]), # target_size=recolor.size, edge_softer=mask_softer_model, is_seg19=True # ) # edge = get_edge(swapped_face_image) # edge = np.array(edge).astype(np.float32) / 255. # blending_mask = (blending_mask - edge).clip(0., 1.) # Image.fromarray((blending_mask.squeeze() * 255.).astype(np.uint8)).save( # os.path.join(recolor_save_dir, "blend_mask_%04d.png" % i) # ) # recolor = blending_two_images_with_mask( # swapped_face_image, recolor, up_ratio=0.95, up_mask=blending_mask.copy() # ) # recolor.save(os.path.join(recolor_save_dir, "recolor_blend_%04d.png" % i)) predictions_recolor[i] = np.array(recolor) # RGB imgs_save_dir = os.path.join(self.out_dir, "imgs") os.makedirs(imgs_save_dir, exist_ok=True) for i in range(len(predictions_recolor)): Image.fromarray(predictions_recolor[i]).save( os.path.join(imgs_save_dir, "%s%04d.png" % ("D_recolor_", i))) ''' end ''' self._free_face_reenact_model() return predictions, predictions_recolor def _process_face_enhance(self, lq_images: list, model_name: str = "gpen", save_prefix: str = "D_", ): self._load_face_enhance_model(model_name) enhance_model = self.face_enhance_model[model_name] print("[FaceSwapVideoPipeline] face enhancing...") hq_images = [enhance_model.infer_image(Image.fromarray(lq)) for lq in lq_images] save_dir = os.path.join(self.out_dir, "imgs") os.makedirs(save_dir, exist_ok=True) for i in range(len(hq_images)): hq_images[i].save(os.path.join(save_dir, "%s%04d.png" % (save_prefix, i))) return hq_images @torch.no_grad() def _process_extract_init_style_vectors(self, drivens, targets, drivens_mask, targets_mask): save_dir = os.path.join(self.out_dir, "styleVec") os.makedirs(save_dir, exist_ok=True) net = self._load_e4s_model() for i, (d, t) in enumerate(zip(drivens, targets)): driven = transforms.Compose([TO_TENSOR, NORMALIZE])(d) driven = driven.to(self.device).float().unsqueeze(0) driven_mask = transforms.Compose([TO_TENSOR])(Image.fromarray(drivens_mask[i])) driven_mask = (driven_mask * 255).long().to(self.device).unsqueeze(0) driven_onehot = torch_utils.labelMap2OneHot(driven_mask, num_cls=self.num_seg_cls) target = transforms.Compose([TO_TENSOR, NORMALIZE])(t) target = target.to(self.device).float().unsqueeze(0) target_mask = transforms.Compose([TO_TENSOR])(Image.fromarray(targets_mask[i])) target_mask = (target_mask * 255).long().to(self.device).unsqueeze(0) target_onehot = torch_utils.labelMap2OneHot(target_mask, num_cls=self.num_seg_cls) driven_style_vector, _ = net.get_style_vectors(driven, driven_onehot) torch.save(driven_style_vector, os.path.join(save_dir, "D_style_vec_%04d.pt" % i)) target_style_vector, _ = net.get_style_vectors(target, target_onehot) torch.save(target_style_vector, os.path.join(save_dir, "T_style_vec_%04d.pt" % i)) def _process_pti_tuning(self, pti_resume_weight_path: str = "./video_outputs/finetuned_G_lr0.001000_iters80.pth", target_frames_cnt: int = -1, ): opts = self.e4s_opt pti_steps = opts.max_pti_steps if pti_steps > 0: # needs PTI finetune_coach = VideoSwapPTICoach( opts, e4s_net=self._load_e4s_model(), num_targets=target_frames_cnt, erode=True) finetune_coach.train() # save tuned weights save_dict = finetune_coach.get_save_dict() self.pti_save_fn = "PTI_G_lr%f_iters%d.pth" % (opts.pti_learning_rate, pti_steps) save_path = os.path.join(opts.exp_dir, self.pti_save_fn) torch.save(save_dict, save_path) net = finetune_coach.net print(f"[FaceSwapVideoPipeline] PTI training finished, model saved to: {save_path}") else: # load PTI tuned weights if not os.path.exists(pti_resume_weight_path): print(f"Tuned PTI weights not found! Load the latest tuned PTI weight: ({self.pti_save_fn})") pti_resume_weight_path = os.path.join(opts.exp_dir, self.pti_save_fn) net = self._load_e4s_model() pti_tuned_weights = torch.load(pti_resume_weight_path) net.latent_avg = pti_tuned_weights['latent_avg'].to(opts.device) net.load_state_dict(torch_utils.remove_module_prefix(pti_tuned_weights["state_dict"], prefix="module.")) print(f"[FaceSwapVideoPipeline] Load pre-trained PTI weights from: {pti_resume_weight_path}") self.e4s_model = net return def _process_face_swapping(self, target_frames_cnt: int = 120, targets_inv_trans: list = None, targets_ori: list = None, ): out_dir = self.out_dir opts = self.e4s_opt swap_save_dir = os.path.join(self.out_dir, "swapped") # paste back os.makedirs(swap_save_dir, exist_ok=True) net = self.e4s_model swaps_face = [] swaps_mask = [] for i in tqdm(range(target_frames_cnt), desc="Swapping"): D = Image.open(os.path.join(opts.exp_dir, "imgs", "D_%04d.png" % i)).convert( "RGB").resize((1024, 1024)) T = Image.open(os.path.join(opts.exp_dir, "imgs", "T_%04d.png" % i)).convert( "RGB").resize((1024, 1024)) D_mask = np.array( Image.open(os.path.join(opts.exp_dir, "mask", "D_mask_%04d.png" % i))) T_mask = np.array( Image.open(os.path.join(opts.exp_dir, "mask", "T_mask_%04d.png" % i))) # T_mask, _ = erode_mask(T_mask, T, radius=1, verbose=True) # swapped_msk, hole_map, eyebrows_line = swap_head_mask_revisit(D_mask, T_mask) # 换头 swapped_msk, hole_mask, hole_map, eye_line = swap_head_mask_hole_first(D_mask, T_mask) cv2.imwrite(os.path.join(out_dir, "mask", "swappedMask_%04d.png" % i), swapped_msk) swaps_mask.append(swapped_msk) swappped_one_hot = torch_utils.labelMap2OneHot( torch.from_numpy(swapped_msk).unsqueeze(0).unsqueeze(0).long(), num_cls=12).to(opts.device) # torch_utils.tensor2map(swappped_one_hot[0]).save(os.path.join(opts.exp_dir,"swappedMaskVis.png")) # 保留 target_style_vectors 的background, hair, 其余的全用 driven_style_vectors D_style_vector = torch.load( os.path.join(out_dir, "styleVec", "D_style_vec_%04d.pt" % i)).to( opts.device).float() T_style_vector = torch.load( os.path.join(out_dir, "styleVec", "T_style_vec_%04d.pt" % i)).to( opts.device).float() comp_indices = set(range(opts.num_seg_cls)) - {0, 4, 11} # 9 mouth swapped_style_vectors = swap_comp_style_vector(T_style_vector, D_style_vector, list(comp_indices), belowFace_interpolation=False) with torch.no_grad(): swapped_style_codes = net.cal_style_codes(swapped_style_vectors) swapped_face, _, structure_feats = net.gen_img(torch.zeros(1, 512, 32, 32).to(opts.device), swapped_style_codes, swappped_one_hot) # in [-1,1] ''' save images ''' swapped_face_image = torch_utils.tensor2im(swapped_face) swapped_face_image = swapped_face_image.resize((512, 512)).resize((1024, 1024)) swapped_m = transforms.Compose([TO_TENSOR])(swapped_msk) swapped_m = (swapped_m * 255).long().to(opts.device).unsqueeze(0) swapped_face_image.save(os.path.join(swap_save_dir, "pti_gen_%04d.png" % i)) swaps_face.append(swapped_face_image) outer_dilation = 5 # 这个值可以调节 mask_bg = logical_or_reduce(*[swapped_m == clz for clz in [0, 11, 7, 4, 8]]) # 4,8,7 # 如果是视频换脸,考虑把头发也弄进来当做背景的一部分, 11 earings 4 hair 8 neck 7 ear is_foreground = torch.logical_not(mask_bg) hole_index = hole_mask[None][None] is_foreground[hole_index[None]] = True foreground_mask = is_foreground.float() # foreground_mask = dilation(foreground_mask, torch.ones(2 * outer_dilation + 1, 2 * outer_dilation + 1, device=foreground_mask.device), engine='convolution') content_mask, border_mask, full_mask = create_masks(foreground_mask, operation='expansion', radius=5) # past back content_mask = F.interpolate(content_mask, (1024, 1024), mode='bilinear', align_corners=False) content_mask = content_mask[0, 0, :, :, None].cpu().numpy() border_mask = F.interpolate(border_mask, (1024, 1024), mode='bilinear', align_corners=False) border_mask = border_mask[0, 0, :, :, None].cpu().numpy() border_mask = np.repeat(border_mask, 3, axis=-1) swapped_and_pasted = swapped_face_image * content_mask + T * (1 - content_mask) swapped_and_pasted = Image.fromarray(blending(np.array(T), swapped_and_pasted, mask=border_mask)) pasted_image = swapped_and_pasted if targets_inv_trans is None: # op1. directly paste pasted_image.save(os.path.join(swap_save_dir, "swap_face_%04d.png"%i)) else: # op2. paste back swapped_and_pasted = swapped_and_pasted.convert('RGBA') pasted_image = targets_ori[i].convert('RGBA') swapped_and_pasted.putalpha(255) projected = swapped_and_pasted.transform(targets_ori[i].size, Image.PERSPECTIVE, targets_inv_trans[i], Image.BILINEAR) pasted_image.alpha_composite(projected) pasted_image.save(os.path.join(swap_save_dir, "swap_face_%04d.png" % i)) return { "swaps_face": swaps_face, "swaps_mask": swaps_mask, } def _prepare_outputs(self, result_video_fn: str, target_video_path: str): out_dir = self.out_dir swap_save_dir = os.path.join(self.out_dir, "swapped") save_frames_as_video( frames=swap_save_dir, video_save_dir=out_dir, video_save_fn=result_video_fn, frame_template="swap_face_%04d.png", audio_from=target_video_path, delete_tmp_frames=False, ) return def _load_face_reenact_model(self): if len(self.face_reenact_model.items()) > 0: return self.face_reenact_model face_vid2vid_cfg = "./pretrained/faceVid2Vid/vox-256.yaml" face_vid2vid_ckpt = "./pretrained/faceVid2Vid/00000189-checkpoint.pth.tar" generator, kp_detector, he_estimator, estimate_jacobian = init_facevid2vid_pretrained_model( face_vid2vid_cfg, face_vid2vid_ckpt ) self.face_reenact_model["generator"] = generator self.face_reenact_model["kp_detector"] = kp_detector self.face_reenact_model["he_estimator"] = he_estimator self.face_reenact_model["estimate_jacobian"] = estimate_jacobian print("[FaceSwapVideoPipeline] Face reenactment model loaded.") return self.face_reenact_model def _free_face_reenact_model(self): keys = self.face_reenact_model.keys() for k in tuple(keys): del self.face_reenact_model[k] print("[FaceSwapVideoPipeline] Face reenactment model free memory.") self.face_reenact_model = {} def _load_face_parsing_model(self): if len(self.face_parsing_model.items()) > 0: return self.face_parsing_model face_parsing_ckpt = "./pretrained/faceseg/79999_iter.pth" self.face_parsing_model["model"] = init_faceParsing_pretrained_model(face_parsing_ckpt) print("[FaceSwapVideoPipeline] Face parsing model loaded.") return self.face_parsing_model def _load_face_enhance_model(self, name: str = "gpen"): if self.face_enhance_model.get(name) is not None: return self.face_enhance_model if name == "gpen": self.face_enhance_model["gpen"] = GPENInfer() elif name == "codeformer": self.face_enhance_model["codeformer"] = CodeFormerInfer() elif name == "realesr": self.face_enhance_model["realesr"] = RealESRBatchInfer() else: raise KeyError(f"Not supported face enhancement model: {name}") print(f"[FaceSwapVideoPipeline] Face enhancing model loaded: {name}") return self.face_enhance_model def _load_e4s_model(self): if not self.e4s_model is None: return self.e4s_model opts = self.e4s_opt
net = Net3(opts)
2
2023-10-15 12:15:01+00:00
24k
sotopia-lab/sotopia
sotopia/server.py
[ { "identifier": "Agents", "path": "sotopia/agents/llm_agent.py", "snippet": "class Agents(dict[str, BaseAgent[Observation, AgentAction]]):\n def reset(self) -> None:\n for agent in self.values():\n agent.reset()\n\n def act(self, obs: dict[str, Observation]) -> dict[str, AgentAction]:\n return {\n agent_name: agent.act(obs[agent_name])\n for agent_name, agent in self.items()\n }" }, { "identifier": "HumanAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class HumanAgent(BaseAgent[Observation, AgentAction]):\n \"\"\"\n A human agent that takes input from the command line.\n \"\"\"\n\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n goal = input(\"Goal: \")\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n print(\"Available actions:\")\n for i, action in enumerate(obs.available_actions):\n print(f\"{i}: {action}\")\n\n action_type = obs.available_actions[int(input(\"Action type: \"))]\n argument = input(\"Argument: \")\n\n return AgentAction(action_type=action_type, argument=argument)\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n print(\"Available actions:\")\n for i, action in enumerate(obs.available_actions):\n print(f\"{i}: {action}\")\n\n if obs.available_actions != [\"none\"]:\n action_type_number = await ainput(\n \"Action type (Please only input the number): \"\n )\n try:\n action_type_number = int(action_type_number) # type: ignore\n except:\n print(\"Please input a number.\")\n action_type_number = await ainput(\n \"Action type (Please only input the number): \"\n )\n action_type_number = int(action_type_number) # type: ignore\n assert isinstance(\n action_type_number, int\n ), \"Please input a number.\"\n action_type = obs.available_actions[action_type_number]\n else:\n action_type = \"none\"\n if action_type in [\"speak\", \"non-verbal communication\"]:\n argument = await ainput(\"Argument: \")\n else:\n argument = \"\"\n\n return AgentAction(action_type=action_type, argument=argument)" }, { "identifier": "LLMAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class LLMAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n script_like: bool = False,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.script_like = script_like\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n assert (\n len(self.inbox) > 0\n ), \"attribute goal has to be called after at least one step\"\n goal = generate_goal(\n self.model_name,\n background=self.inbox[0][\n 1\n ].to_natural_language(), # Only consider the first message for now\n )\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action = gen_func(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n )\n return action\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action, prompt = await agenerate_action(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n script_like=self.script_like,\n )\n return action" }, { "identifier": "ScriptWritingAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class ScriptWritingAgent(LLMAgent):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n agent_names: list[str] = [],\n background: ScriptBackground | None = None,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.agent_names = agent_names\n assert background is not None, \"background cannot be None\"\n self.background = background\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n message_to_compose = [\n y for idx, (x, y) in enumerate(self.inbox) if idx != 0\n ]\n\n history = \"\\n\".join(\n f\"{y.to_natural_language()}\" for y in message_to_compose\n )\n print(\"Current agent: \", self.agent_name)\n print(\"Composed history: \", history)\n\n action, prompt = await agenerate_script(\n model_name=self.model_name,\n background=self.background,\n agent_names=self.agent_names,\n history=history,\n agent_name=self.agent_name,\n single_step=True,\n )\n # action: tuple[\n # list[list[tuple[str, str, Message]]], list[tuple[str, Message]]\n # ]\n returned_action = cast(AgentAction, action[1][0][1])\n print(\"Action: \", returned_action, type(returned_action))\n # print(\"Action: \", action)\n # exit(0)\n\n return returned_action" }, { "identifier": "SpeakAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class SpeakAgent(LLMAgent):\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action_speak,\n ) -> AgentAction:\n return super().act(obs, gen_func=gen_func)" }, { "identifier": "RedisAgent", "path": "sotopia/agents/redis_agent.py", "snippet": "class RedisAgent(BaseAgent[Observation, AgentAction]):\n \"\"\"An agent use redis as a message broker.\"\"\"\n\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n session_id: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n # super().__init__(agent_name=agent_name, uuid_str=uuid_str)\n self.session_id = session_id or str(uuid4())\n self.sender_id = str(uuid4())\n print(f\"session id: {self.session_id}\")\n print(\"step 1: connect to the server\")\n assert (\n \"FASTAPI_URL\" in os.environ\n ), \"To use redis agent, you have to launch a FastAPI server and set FASTAPI_URL\"\n self._URL = os.environ[\"FASTAPI_URL\"]\n response = requests.request(\n \"POST\",\n f\"{self._URL}/connect/{self.session_id}/server/{self.sender_id}\",\n )\n assert (\n response.status_code == 200 and response.text == \"[]\"\n ), \"Failed to connect to the server\"\n logging.info(f\"Session ID: {self.session_id}\")\n # logging.info(f\"Sender ID: {self.sender_id}\")\n\n def act(\n self,\n obs: Observation,\n ) -> AgentAction:\n raise NotImplementedError\n\n async def aact(\n self,\n obs: Observation,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n if obs.turn_number == 0:\n async with aiohttp.ClientSession() as session:\n print(\"step 2: post observation to the message list\")\n response = await session.request(\n \"POST\",\n f\"{self._URL}/send/{self.session_id}/{self.sender_id}\",\n data=obs.to_natural_language(),\n )\n assert response.status == 200, response\n sorted_message_list: list[tuple[float, str, str]] = list(\n map(\n lambda x: MessageTransaction.parse_obj(\n x\n ).to_tuple(),\n await response.json(),\n )\n )\n last_timestamp = sorted_message_list[-1][0]\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n async with aiohttp.ClientSession() as session:\n # 1. post observation to the message list\n response = await session.request(\n \"POST\",\n f\"{self._URL}/send/{self.session_id}/{self.sender_id}\",\n data=obs.to_natural_language(),\n )\n assert response.status == 200, response\n sorted_message_list = list(\n map(\n lambda x: MessageTransaction.parse_obj(x).to_tuple(),\n await response.json(),\n )\n )\n last_timestamp = sorted_message_list[-1][0]\n\n print(\"step 2: unlock the server for the client\")\n # 2. unlock the server for the client\n response = await session.request(\n \"PUT\",\n f\"{self._URL}/lock/{self.session_id}/{self.sender_id}/action\",\n )\n assert response.status == 200, response\n\n print(\"step 3: wait for the client to post their message\")\n # 3. wait for the client to post their message\n for _ in range(300):\n response = await session.request(\n \"GET\",\n f\"{self._URL}/get/{self.session_id}\",\n )\n # print(f\"get response: {response}\")\n assert response.status == 200, response\n sorted_message_list = list(\n map(\n lambda x: MessageTransaction.parse_obj(\n x\n ).to_tuple(),\n await response.json(),\n )\n )\n if (\n sorted_message_list[-1][0] > last_timestamp\n and sorted_message_list[-1][1] == \"client\"\n ):\n # 3.a if the client has posted their message, lock the server for the client\n response = await session.request(\n \"PUT\",\n f\"{self._URL}/lock/{self.session_id}/{self.sender_id}/no%20action\",\n )\n assert response.status == 200, response\n break\n else:\n # 3.b if the client has not posted their message, wait for 0.1 second and retry\n await asyncio.sleep(1)\n else:\n response = await session.request(\n \"PUT\",\n f\"{self._URL}/lock/{self.session_id}/{self.sender_id}/no%20action\",\n )\n self.reset(\n \"Someone has left or the conversation is too long.\"\n )\n return AgentAction(action_type=\"leave\", argument=\"\")\n action_string = sorted_message_list[-1][2]\n try:\n action = AgentAction.parse_raw(action_string)\n return action\n except pydantic.error_wrappers.ValidationError:\n logging.warn(\n \"Failed to parse action string {}. Fall back to speak\".format(\n action_string\n )\n )\n return AgentAction(\n action_type=\"speak\", argument=sorted_message_list[-1][2]\n )\n\n def reset(\n self,\n reset_reason: str = \"\",\n ) -> None:\n super().reset()\n try:\n if reset_reason != \"\":\n response = requests.request(\n \"POST\",\n f\"{self._URL}/send/{self.session_id}/{self.sender_id}\",\n json=reset_reason,\n )\n assert response.status_code == 200\n\n except Exception as e:\n logging.error(f\"Failed to reset RedisAgent {self.sender_id}: {e}\")" }, { "identifier": "BaseAgent", "path": "sotopia/agents/base_agent.py", "snippet": "class BaseAgent(Generic[ObsType, ActType], MessengerMixin):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n MessengerMixin.__init__(self)\n if agent_profile is not None:\n self.profile = agent_profile\n self.agent_name = (\n self.profile.first_name + \" \" + self.profile.last_name\n )\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = AgentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n self.agent_name = (\n self.profile.first_name + \" \" + self.profile.last_name\n )\n else:\n assert (\n agent_name is not None\n ), \"Either agent_name or uuid_str must be provided\"\n self.agent_name = agent_name\n\n self._goal: str | None = None\n\n @property\n def goal(self) -> str:\n assert (\n self._goal is not None\n ), \"attribute goal has to be set before use\"\n return self._goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(self, obs: ObsType) -> ActType:\n raise NotImplementedError\n\n async def aact(self, obs: ObsType) -> ActType:\n raise NotImplementedError\n\n def reset(self) -> None:\n self.reset_inbox()" }, { "identifier": "EpisodeLog", "path": "sotopia/database/logs.py", "snippet": "class EpisodeLog(JsonModel):\n # Note that we did not validate the following constraints:\n # 1. The number of turns in messages and rewards should be the same or off by 1\n # 2. The agents in the messages are the same as the agetns\n\n environment: str = Field(index=True)\n agents: list[str] = Field(index=True)\n tag: str | None = Field(index=True)\n models: list[str] | None = Field(index=True)\n messages: list[list[tuple[str, str, str]]] # Messages arranged by turn\n reasoning: str\n rewards: list[\n tuple[float, dict[str, float]] | float\n ] # Rewards arranged by turn\n rewards_prompt: str\n\n @root_validator\n def agent_number_message_number_reward_number_turn_number_match(\n cls, values: Any\n ) -> Any:\n agents, _, reasoning, rewards = (\n values.get(\"agents\"),\n values.get(\"messages\"),\n values.get(\"reasoning\"),\n values.get(\"rewards\"),\n )\n agent_number = len(agents)\n\n assert (\n len(rewards) == agent_number\n ), f\"Number of agents in rewards {len(rewards)} and agents {agent_number} do not match\"\n return values\n\n def render_for_humans(self) -> tuple[list[AgentProfile], list[str]]:\n \"\"\"Generate a human readable version of the episode log.\n\n Returns:\n A tuple of (a list of agent_profiles, a list of str): The agent profiles, and the messages and rewards in each turn.\n \"\"\"\n\n agent_profiles = [\n AgentProfile.get(pk=uuid_str) for uuid_str in self.agents\n ]\n messages_and_rewards = []\n for idx, turn in enumerate(self.messages):\n messages_in_this_turn = []\n if idx == 0:\n assert (\n len(turn) >= 2\n ), \"The first turn should have at least environemnt messages\"\n messages_in_this_turn.append(turn[0][2])\n messages_in_this_turn.append(turn[1][2])\n for sender, receiver, message in turn:\n if receiver == \"Environment\":\n if sender != \"Environment\":\n if \"did nothing\" in message:\n continue\n else:\n if \"said:\" in message:\n messages_in_this_turn.append(\n f\"{sender} {message}\"\n )\n else:\n messages_in_this_turn.append(\n f\"{sender}: {message}\"\n )\n else:\n messages_in_this_turn.append(message)\n messages_and_rewards.append(\"\\n\".join(messages_in_this_turn))\n messages_and_rewards.append(f\"The reasoning is:\\n{self.reasoning}\")\n messages_and_rewards.append(\n f\"The rewards are:\\nAgent 1: {self.rewards[0]}\\nAgent 2: {self.rewards[1]}\"\n )\n return agent_profiles, messages_and_rewards" }, { "identifier": "AgentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class AgentProfile(JsonModel):\n first_name: str = Field(index=True)\n last_name: str = Field(index=True)\n age: int = Field(index=True, default_factory=lambda: 0)\n occupation: str = Field(index=True, default_factory=lambda: \"\")\n gender: str = Field(index=True, default_factory=lambda: \"\")\n gender_pronoun: str = Field(index=True, default_factory=lambda: \"\")\n public_info: str = Field(index=True, default_factory=lambda: \"\")\n big_five: str = Field(index=True, default_factory=lambda: \"\")\n moral_values: list[str] = Field(index=False, default_factory=lambda: [])\n schwartz_personal_values: list[str] = Field(\n index=False, default_factory=lambda: []\n )\n personality_and_values: str = Field(index=True, default_factory=lambda: \"\")\n decision_making_style: str = Field(index=True, default_factory=lambda: \"\")\n secret: str = Field(default_factory=lambda: \"\")\n model_id: str = Field(default_factory=lambda: \"\")" }, { "identifier": "EnvironmentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class EnvironmentProfile(JsonModel):\n codename: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The codename of the environment\",\n )\n source: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The source of the environment\",\n )\n scenario: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"A concrete scenario of where the social interaction takes place, the scenario should have two agents (agent1 and agent2), and you should illustrate the relationship between the two agents, and for what purpose agent1 is interacting with agent2. Please avoid mentioning specific names and occupations in the scenario and keep all the mentions gender-neutral. Also avoid generating scenarios that requires childrend (below 18) or elderly (above 70) to be involved.\",\n )\n agent_goals: list[str] = Field(\n default_factory=lambda: [],\n description=\"The social goals of each agent, which could include <extra_info>...</extra_info>, <clarification_hint>...</clarification_hint>, and <strategy_hint>...</strategy_hint> to help the agent achieve the goal. Avoid providing too specific strategy hint, try to be as abstract as possible. For example, use 'you can provide financial benefits to achieve your goal' instead of 'you can buy him a boba tea to achieve your goal.'\",\n )\n relationship: RelationshipType = Field(\n index=True,\n default_factory=lambda: RelationshipType.stranger,\n description=\"The relationship between the two agents, choose from: stranger, know_by_name, acquaintance, friend, romantic_relationship, family_member. Do not make up a relationship, but choose from the list, 0 means stranger, 1 means know_by_name, 2 means acquaintance, 3 means friend, 4 means romantic_relationship, 5 means family_member\",\n )\n age_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The age constraint of the environment, a list of tuples, each tuple is a range of age, e.g., '[(18, 25), (30, 40)]' means the environment is only available to agent one between 18 and 25, and agent two between 30 and 40\",\n )\n occupation_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The occupation constraint of the environment, a list of lists, each list is a list of occupations, e.g., '[['student', 'teacher'], ['doctor', 'nurse']]' means the environment is only available to agent one if agent one is a student or a teacher, and agent two is a doctor or a nurse\",\n )\n agent_constraint: list[list[str]] | None = Field(\n default_factory=lambda: None,\n )" }, { "identifier": "ParallelSotopiaEnv", "path": "sotopia/envs/parallel.py", "snippet": "class ParallelSotopiaEnv(\n ParallelEnv[str, Observation, AgentAction], MessengerMixin\n):\n def __init__(\n self,\n available_action_types: set[ActionType] = set(\n [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"]\n ),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"simutaneous\",\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n evaluators: list[Evaluator] = [],\n terminal_evaluators: list[Evaluator] = [],\n uuid_str: str | None = None,\n env_profile: EnvironmentProfile | None = None,\n ) -> None:\n \"\"\"A sotopia environment for parallel agents.\n\n Args:\n available_action_types (set[ActionType], optional): The action types that are available to the agents. Defaults to set([\"none\", \"speak\", \"non-verbal communication\", \"action\"]).\n action_order (Literal[\"simutaneous\", \"round-robin\", \"random\"], optional): The order in which the agents take actions. Defaults to \"simutaneous\".\n model_name (LLM_Name, optional): The name of the language model to use. Defaults to \"gpt-3.5-turbo\".\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.background = ScriptBackground(\n scenario=\"\",\n p1_background=\"\",\n p2_background=\"\",\n p1_goal=\"\",\n p2_goal=\"\",\n p1_name=\"\",\n p2_name=\"\",\n )\n\n self.agents = []\n self.action_spaces = {}\n self.available_action_types = list(available_action_types)\n self.action_order = action_order\n self.action_mask: list[bool] = []\n self.evaluators = evaluators\n self.terminal_evaluators = terminal_evaluators\n\n # if an environment profile is provided, use it\n assert (\n env_profile or uuid_str\n ), \"Either env_profile or uuid_str must be provided\"\n if env_profile is not None:\n self.profile = env_profile\n # if a uuid is provided, try to load the environment profile from the database\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = EnvironmentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n\n @configurable\n def reset(\n self,\n seed: int | None = None,\n options: dict[str, str] | None = None,\n agents: Agents | None = None,\n omniscient: bool = False,\n lite: bool = False,\n ) -> dict[str, Observation]:\n \"\"\"Starting a new episode. Must be called before step().\n\n Args:\n seed (int, optional): Seed for the environment. Defaults to None. Not used right now.\n options (dict, optional): Options for the environment. Defaults to None.\n \"partial_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound can be incompleted (\"unknown\" for missing parts), and the missing parts will be filled in by the environment.\n \"full_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound must be completed (no \"unknown\" for missing parts).\n omniscient (bool, optional): Whether the agents know the other agent's goal. Defaults to False.\n \"\"\"\n super().__init__()\n MessengerMixin.reset_inbox(self)\n assert (\n not options\n or not (\"partial_background_file\" in options)\n and not (\"full_background_file\" in options)\n ), \"partial_background_file and full_background_file are not supported anymore\"\n if agents is not None:\n assert agents, \"agents must be provided\"\n assert len(agents) == 2, \"Only supporting two agents right now\"\n agent_names = list(agents.keys())\n agent_goals = self.profile.agent_goals\n assert (\n len(agent_goals) == 2\n ), \"Only supporting two agents right now\"\n\n raw_background = ScriptBackground(\n scenario=self.profile.scenario,\n p1_background=get_bio(\n self.profile.relationship,\n agents[agent_names[0]].profile,\n agent_id=0,\n ),\n p2_background=get_bio(\n self.profile.relationship,\n agents[agent_names[1]].profile,\n agent_id=1,\n ),\n p1_goal=f\"<root viewer='agent_0'>{agent_goals[0]}</root>\",\n p2_goal=f\"<root viewer='agent_1'>{agent_goals[1]}</root>\",\n p1_name=agent_names[0],\n p2_name=agent_names[1],\n )\n\n if lite:\n raw_background.p1_background = \"\"\n raw_background.p2_background = \"\"\n\n self.background = ScriptBackground(\n scenario=render_text_for_environment(raw_background.scenario),\n p1_background=render_text_for_environment(\n raw_background.p1_background\n ),\n p2_background=render_text_for_environment(\n raw_background.p2_background\n ),\n p1_goal=render_text_for_environment(raw_background.p1_goal),\n p2_goal=render_text_for_environment(raw_background.p2_goal),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n else:\n raise ValueError(\"agents must be provided\")\n\n self.agents = [self.background.p1_name, self.background.p2_name]\n agent_backgrounds: list[ScriptBackground] = []\n if omniscient:\n for i in range(self.num_agents):\n agent_backgrounds.append(copy.deepcopy(self.background))\n else:\n for i in range(self.num_agents):\n agent_backgrounds.append(\n ScriptBackground(\n scenario=render_text_for_agent(\n raw_background.scenario, i\n ),\n p1_background=render_text_for_agent(\n raw_background.p1_background, i\n ),\n p2_background=render_text_for_agent(\n raw_background.p2_background, i\n ),\n p1_goal=render_text_for_agent(\n raw_background.p1_goal, i\n ),\n p2_goal=render_text_for_agent(\n raw_background.p2_goal, i\n ),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n )\n background_for_a = agent_backgrounds[0]\n background_for_b = agent_backgrounds[1]\n\n print(\"Is the agent omniscient?\", omniscient)\n if not omniscient:\n background_for_a.p2_goal = \"Unknown\"\n background_for_b.p1_goal = \"Unknown\"\n\n self.action_spaces = {\n agent: Dict(\n dict(\n action_type=Discrete(len(self.available_action_types)),\n argument=Text(256),\n )\n )\n for agent in self.agents\n }\n self.turn_number = 0\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[0] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n\n self.recv_message(\"Environment\", self.background)\n\n return {\n self.background.p1_name: Observation(\n last_turn=background_for_a.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=background_for_b.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n }\n\n @beartype\n def step(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *(\n evaluator(\n turn_number=self.turn_number, messages=self.inbox\n )\n for evaluator in self.evaluators\n )\n )\n )\n )\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n },\n )\n\n @beartype\n async def astep(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.evaluators\n ]\n )\n )\n )\n )\n\n if response.terminated:\n terminal_response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.terminal_evaluators\n ]\n )\n )\n )\n )\n # incorporate terminal response into response\n response.p1_rate = response.p1_rate or terminal_response.p1_rate\n response.p2_rate = response.p2_rate or terminal_response.p2_rate\n if response.comments and terminal_response.comments:\n response.comments += terminal_response.comments\n elif terminal_response.comments:\n response.comments = terminal_response.comments\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n info = {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n }\n if response.terminated:\n info[\"rewards_prompt\"] = {\"overall_prompt\": self.terminal_evaluators[0].prompt} # type: ignore\n\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n info,\n )\n\n def render(self, mode: str = \"human\") -> None:\n pass\n\n def close(self) -> None:\n pass" }, { "identifier": "ReachGoalLLMEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class ReachGoalLLMEvaluator(Evaluator):\n @beartype\n def __init__(\n self, model_name: LLM_Name, response_format: str = \"basic\"\n ) -> None:\n self.model_name = model_name\n self.prompt = \"\"\n self.response_format = response_format\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n raise NotImplementedError(\n \"ReachGoalLLMEvaluator is not implemented for synchronous evaluation\"\n )\n\n @gin.configurable\n @beartype\n async def __acall__(\n self,\n turn_number: int,\n messages: list[tuple[str, Message]] | None,\n history: str = \"\",\n temperature: float = 0.7,\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # filter did nothing\n if not history and messages:\n messages_filtered = [\n (x, y)\n for x, y in messages\n if \"did nothing\" not in y.to_natural_language()\n ]\n history = \"\\n\".join(\n [\n f\"{x} {y.to_natural_language()}\"\n if x != \"Environment\"\n else y.to_natural_language()\n for x, y in messages_filtered\n ]\n )\n response_format_class = (\n EnvResponsePlus if self.response_format == \"plus\" else EnvResponse\n )\n\n try:\n response: EnvResponsePlus | EnvResponse # fix type error from langchain 0.0.264. we don't need this line for langchain 0.0.263\n response, prompt = await agenerate(\n model_name=self.model_name,\n template=\"\"\"{history},\n Based on previous interactions, evaluate how well participants achieve their goals.\n Please following the format:\n {format_instructions}\n \"\"\",\n input_values=dict(history=history),\n output_parser=PydanticOutputParser[\n EnvResponsePlus | EnvResponse\n ](pydantic_object=response_format_class),\n temperature=temperature,\n )\n self.prompt = prompt\n response_list = []\n # TODO: multiple agents\n for dimension in response.agent_1_evaluation.dict().keys():\n response_list.append(\n (\n \"agent_1\",\n (\n (\n dimension,\n response.agent_1_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_1_evaluation.dict()[dimension][0],\n ),\n )\n )\n response_list.append(\n (\n \"agent_2\",\n (\n (\n dimension,\n response.agent_2_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_2_evaluation.dict()[dimension][0],\n ),\n )\n )\n return response_list\n except Exception as e:\n log.debug(f\"[red] Failed to generate environment response. {e}\")\n return []" }, { "identifier": "RuleBasedTerminatedEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class RuleBasedTerminatedEvaluator(Evaluator):\n def __init__(\n self, max_turn_number: int = 20, max_stale_turn: int = 2\n ) -> None:\n self.max_turn_number = max_turn_number\n self.max_stale_turn = max_stale_turn\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # Rule 1: If the conversation is too long, terminate the conversation\n conversation_too_long = turn_number > self.max_turn_number\n # Rule 2: If one of the players leaves, terminate the conversation\n p1_leaving = (\n len(messages) > 1\n and isinstance(messages[-2][1], AgentAction)\n and messages[-2][1].action_type == \"leave\"\n )\n p2_leaving = (\n bool(len(messages))\n and isinstance(messages[-1][1], AgentAction)\n and messages[-1][1].action_type == \"leave\"\n )\n # Rule 3: If the conversation is stale for too long, terminate the conversation\n stale_count = 0\n for message in messages[::-1]:\n if message[0] == \"Environment\":\n continue\n assert isinstance(message[1], AgentAction)\n if message[1].action_type == \"none\":\n stale_count += 1\n else:\n break\n if stale_count > self.max_stale_turn:\n break\n stale_too_long = stale_count > self.max_stale_turn\n terminated = (\n conversation_too_long or p1_leaving or p2_leaving or stale_too_long\n )\n reasons_for_termination = (\n f\"{'The conversation is too long; ' if conversation_too_long else ''}\"\n f\"{'Agent 1 is leaving; ' if p1_leaving else ''}\"\n f\"{'Agent 2 is leaving; ' if p2_leaving else ''}\"\n f\"{'The conversation stales for too long; ' if stale_too_long else ''}\"\n )\n return [\n (\n \"environment\",\n ((\"terminated\", terminated), reasons_for_termination),\n )\n ]\n\n async def __acall__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n return self(turn_number, messages)" }, { "identifier": "unweighted_aggregate_evaluate", "path": "sotopia/envs/evaluators.py", "snippet": "@beartype\ndef unweighted_aggregate_evaluate(\n responses: list[tuple[str, tuple[tuple[str, int | float | bool], str]]],\n) -> ScriptEnvironmentResponse:\n \"\"\"\n Aggregate the responses from the environment\n\n Args:\n responses (list[tuple[str, tuple[tuple[str, int | bool], str]]]): list of responses from the environment\n Each response is a tuple of (agent_name/environment, (response, reasoning))\n \"\"\"\n responses_dict: dict[\n str, list[tuple[tuple[str, int | float | bool], str]]\n ] = defaultdict(list)\n for response in responses:\n assert response[0] == \"environment\" or response[0].startswith(\"agent\")\n responses_dict[response[0]].append(response[1])\n\n environment_responses: tuple[dict[str, float | int | bool], str] = ({}, \"\")\n agent_1_responses: tuple[dict[str, float | int | bool], str] = ({}, \"\")\n agent_2_responses: tuple[dict[str, float | int | bool], str] = ({}, \"\")\n for k, v in responses_dict.items():\n if k == \"environment\":\n environment_responses = _reduce(v)\n else:\n if k == \"agent_1\":\n agent_1_responses = _reduce(v)\n elif k == \"agent_2\":\n agent_2_responses = _reduce(v)\n else:\n # TODO: supports more than two agents\n raise ValueError(f\"Only supports agent_1 and agent_2, got {k}\")\n\n comments = (\n (\n f\"Environment comments: {environment_responses[1]}\\n\"\n if environment_responses[1]\n else \"\"\n )\n + (\n f\"Agent 1 comments:\\n{agent_1_responses[1]}\\n\"\n if agent_1_responses[1]\n else \"\"\n )\n + (\n f\"Agent 2 comments:\\n{agent_2_responses[1]}\\n\"\n if agent_2_responses[1]\n else \"\"\n )\n )\n if (\n \"terminated\" in environment_responses[0]\n and environment_responses[0][\"terminated\"]\n ):\n log.debug(f\"[green] The conversation is terminated. {response}\")\n return ScriptEnvironmentResponse(\n terminated=environment_responses[0][\"terminated\"]\n if \"terminated\" in environment_responses[0]\n else False,\n p1_rate=(\n agent_1_responses[0][\"overall_score\"]\n if \"overall_score\" in agent_1_responses[0]\n else 0,\n agent_1_responses[0],\n )\n if agent_1_responses != ({}, \"\")\n else None,\n p2_rate=(\n agent_2_responses[0][\"overall_score\"]\n if \"overall_score\" in agent_2_responses[0]\n else 0,\n agent_2_responses[0],\n )\n if agent_2_responses != ({}, \"\")\n else None,\n comments=comments,\n )" }, { "identifier": "LLM_Name", "path": "sotopia/generation_utils/generate.py", "snippet": "class EnvResponse(BaseModel):\nclass EnvResponsePydanticOutputParser(PydanticOutputParser[EnvResponse]):\nclass ListOfIntOutputParser(BaseOutputParser[list[int]]):\nclass ListOfStrOutputParser(BaseOutputParser[list[str]]):\nclass StrOutputParser(BaseOutputParser[str]):\nclass ScriptOutputParser(BaseOutputParser[ScriptInteractionReturnType]):\n def __init__(self, pydantic_object: Type[BaseModel] = EnvResponse) -> None:\n def parse(self, text: str) -> EnvResponse:\n def get_format_instructions(self) -> str:\n def __init__(\n self,\n number_of_int: int | None = None,\n range_of_int: tuple[int, int] | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[int]:\n def _type(self) -> str:\n def __init__(\n self,\n number_of_str: int | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[str]:\n def _type(self) -> str:\n def __init__(self) -> None:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> str:\n def _type(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> ScriptInteractionReturnType:\n def _type(self) -> str:\ndef _return_fixed_model_version(\n model_name: Literal[\"gpt-3.5-turbo\", \"gpt-4\", \"gpt-4-turbo\"]\n) -> str:\ndef obtain_chain(\n model_name: LLM_Name,\n template: str,\n input_variables: list[str],\n temperature: float = 0.7,\n max_retries: int = 6,\n) -> LLMChain:\ndef format_bad_output_for_script(\n ill_formed_output: str,\n format_instructions: str,\n agents: list[str],\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef format_bad_output(\n ill_formed_output: str,\n format_instructions: str,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef generate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> OutputType:\nasync def agenerate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> tuple[OutputType, str]:\ndef generate_episode(\n model_name: LLM_Name,\n participants: str = \"Jack (a greedy person), Rose\",\n topic: str = \"lawsuit\",\n extra_info: str = \"\",\n) -> EnvResponse:\nasync def agenerate_env_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n temperature: float = 0.7,\n) -> tuple[EnvironmentProfile, str]:\nasync def agenerate_relationship_profile(\n model_name: LLM_Name,\n agents_profiles: list[str],\n) -> tuple[RelationshipProfile, str]:\nasync def agenerate_enviroment_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n) -> tuple[EnvironmentProfile, str]:\ndef fill_in_background(\n model_name: LLM_Name,\n partial_background: ScriptBackground,\n) -> ScriptBackground:\ndef generate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\ndef generate_action_speak(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\nasync def agenerate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n temperature: float = 0.7,\n script_like: bool = False,\n) -> tuple[AgentAction, str]:\nasync def agenerate_script(\n model_name: LLM_Name,\n background: ScriptBackground,\n temperature: float = 0.7,\n agent_names: list[str] = [],\n agent_name: str = \"\",\n history: str = \"\",\n single_step: bool = False,\n) -> tuple[ScriptInteractionReturnType, str]:\ndef process_history(\n script: ScriptBackground | EnvResponse | dict[str, AgentAction]\n) -> str:\ndef generate_init_profile(\n model_name: LLM_Name, basic_info: dict[str, str]\n) -> str:\ndef convert_narratives(model_name: LLM_Name, narrative: str, text: str) -> str:\ndef generate_goal(model_name: LLM_Name, background: str) -> str:" }, { "identifier": "AgentAction", "path": "sotopia/messages/message_classes.py", "snippet": "class AgentAction(Message):\n action_type: ActionType = Field(\n description=\"whether to speak at this turn or choose to not do anything\"\n )\n argument: str = Field(\n description=\"the utterance if choose to speak, the expression or gesture if choose non-verbal communication, or the physical action if choose action\"\n )\n\n def to_natural_language(self) -> str:\n match self.action_type:\n case \"none\":\n return \"did nothing\"\n case \"speak\":\n return f'said: \"{self.argument}\"'\n case \"non-verbal communication\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"action\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"leave\":\n return \"left the conversation\"" }, { "identifier": "Message", "path": "sotopia/messages/message_classes.py", "snippet": "class Message(BaseModel):\n \"\"\"\n An interface for messages.\n There is only one required method: to_natural_language\n \"\"\"\n\n def to_natural_language(self) -> str:\n raise NotImplementedError" }, { "identifier": "Observation", "path": "sotopia/messages/message_classes.py", "snippet": "class Observation(Message):\n last_turn: str = Field(description=\"the last turn of the conversation\")\n turn_number: int = Field(description=\"the turn number of the conversation\")\n available_actions: list[ActionType] = Field(\n description=\"the available actions\"\n )\n\n def to_natural_language(self) -> str:\n if self.turn_number == 0:\n return f\"\\n{self.last_turn}\\nConversation Starts:\\n\"\n else:\n return f\"Turn #{self.turn_number-1}: {self.last_turn}\\n\"" }, { "identifier": "ScriptBackground", "path": "sotopia/messages/message_classes.py", "snippet": "class ScriptBackground(Message):\n scenario: str = Field(description=\"scenario of the episode\")\n p1_name: str = Field(description=\"name of participant 1\")\n p2_name: str = Field(description=\"name of participant 2\")\n p1_background: str = Field(description=\"background of participant 1\")\n p2_background: str = Field(description=\"background of participant 2\")\n p1_goal: str = Field(description=\"goal of participant 1\")\n p2_goal: str = Field(description=\"goal of participant 2\")\n\n def to_natural_language(self) -> str:\n if self.p1_background and self.p2_background:\n return format_docstring(\n f\"\"\"Here is the context of this interaction:\n Scenario: {self.scenario}\n Participants: {self.p1_name} and {self.p2_name}\n {self.p1_name}'s background: {self.p1_background}\n {self.p2_name}'s background: {self.p2_background}\n {self.p1_name}'s goal: {self.p1_goal}\n {self.p2_name}'s goal: {self.p2_goal}\n \"\"\"\n )\n else:\n return format_docstring(\n f\"\"\"Here is the context of this interaction:\n Scenario: {self.scenario}\n Participants: {self.p1_name} and {self.p2_name}\n {self.p1_name}'s goal: {self.p1_goal}\n {self.p2_name}'s goal: {self.p2_goal}\n \"\"\"\n )" }, { "identifier": "ScriptEnvironmentResponse", "path": "sotopia/messages/message_classes.py", "snippet": "class ScriptEnvironmentResponse(Message):\n terminated: bool = Field(\n description=\"whether the conversation is terminated\",\n default_factory=lambda: False,\n )\n p1_rate: float | tuple[float, dict[str, float]] | None = Field(\n description=\"rating of participant 1, on the scale of 1 to 10\"\n )\n p2_rate: float | tuple[float, dict[str, float]] | None = Field(\n description=\"rating of participant 2, on the scale of 1 to 10\"\n )\n comments: str | None = Field(\n description=\"All of the comments supporting the termination and rating\"\n )\n\n def to_natural_language(self) -> str:\n reason_to_stop = format_docstring(\n f\"\"\"Environment response:\n {\"The conversation is terminated.\" if self.terminated else \"\"}\n {\"Rating of participant 1\" + str(self.p1_rate) if self.p1_rate is not None else \"\"}\n {\"Rating of participant 2\" + str(self.p2_rate) if self.p2_rate is not None else \"\"}\n {self.comments if self.comments is not None else \"\"}\n \"\"\"\n )\n clean_text = \"\"\n for line in reason_to_stop.split(\"\\n\"):\n if line.strip():\n clean_text += line + \"\\n\"\n return clean_text" }, { "identifier": "ScriptInteraction", "path": "sotopia/messages/message_classes.py", "snippet": "class ScriptInteraction(Message):\n interactions: str = Field(\n description=\"\"\"The interaction between the two participants in maximum 20 turns. Each turn is separated by a newline, and should only describe one agent. Following the structure:\n Turn #x\n [participant's name] [action] {argument for some actions}\n\n You can use different types of actions, but only use one in each turn. You should move other information into argument part. Below shows a python code snippet of the format for each action type:\n match self.action_type:\n case \"none\":\n return \"did nothing\"\n case \"speak\":\n return f'said: \"{self.argument}\"'\n case \"non-verbal communication\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"action\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"leave\":\n return \"left the conversation\"\n\n For example, the following is acceptable:\n Turn #x\n Oliver Thompson said: \"Hey Esmeralda, what's wrong? You seem upset.\"\n Turn #x\n Esmeralda Solis [action] moved closer\n Turn #x\n Oliver Thompson [non-verbal communication] smiled\n Turn #x\n Esmeralda Solis did nothing\n Turn #x\n Oliver Thompson left the conversation\n Turn #x\n Esmeralda Solis [action] leaned in and lowered her voice: \"Sorry\"\n\n And the following is not acceptable:\n Turn #1\n Oliver Thompson [speak] said: \"Hey Esmeralda, what's wrong? You seem upset.\"\n Turn #1\n Esmeralda Solis non-verbal communication moved closer\n \"\"\"\n )\n\n def to_natural_language(self) -> str:\n return self.interactions\n\n def parse(\n self, agent_names: list[str], background: str\n ) -> tuple[\n list[list[tuple[str, str, Message]]], list[tuple[str, Message]]\n ]:\n interaction = self.interactions\n # print(\"Interaction: \", interaction)\n lines = self.split_by_turn(interaction)\n\n agent_results = []\n results: list[list[tuple[str, str, Message]]] = [\n [\n (\n \"Environment\",\n name,\n Observation(\n last_turn=background,\n turn_number=0,\n available_actions=[\"none\"],\n ),\n )\n for name in agent_names\n ]\n ]\n\n for line_idx, line in enumerate(lines):\n try:\n res = self.parse_single_dialogue(line)\n action: AgentAction = cast(AgentAction, res[\"action\"])\n argument: str = cast(str, res[\"argument\"])\n turn: int = cast(int, res[\"turn\"])\n name: str = cast(str, res[\"name\"])\n\n parsed_action = AgentAction(\n action_type=action, argument=argument\n )\n if name not in agent_names:\n print(\n f\"The name of the agent, {name}, is not in the list of agent names, {agent_names}\"\n )\n name = agent_names[\n line_idx % 2\n ] # TODO Not sure what name to be set here\n except Exception as e:\n print(\n f\"Error when parsing the dialogue: {line}\",\n f\"The error is: {e}\",\n )\n raise e\n parsed_action = AgentAction(action_type=\"none\", argument=\"\")\n name = agent_names[line_idx % 2] # TODO same question as above\n inactive_agent_name = (\n agent_names[0] if name == agent_names[1] else agent_names[1]\n )\n results.append(\n [\n (\n \"Environment\",\n name,\n Observation(\n last_turn=\"environment is the agent\",\n turn_number=line_idx + 1,\n available_actions=[\"none\"],\n ),\n )\n for name in agent_names\n ]\n + [\n (name, \"Environment\", parsed_action),\n (\n inactive_agent_name,\n \"Environment\",\n AgentAction(\n action_type=\"none\", argument=\"did nothing\"\n ),\n ),\n ]\n )\n\n agent_results.append((name, parsed_action))\n # print(\"Parsed agent results: \", agent_results)\n return (results, agent_results) # type: ignore\n\n def parse_single_dialogue(\n self, dialogue: str\n ) -> dict[str, str | int | AgentAction | None]:\n \"\"\"Parse a single dialogue string and return a dictionary with turn, name, action, and argument.\"\"\"\n\n # Match the turn number and name. Assume all agent name starts with a capital letter and is followed by lowercase letters\n match_turn_name = re.match(\n r\"Turn #?(\\d+):?\\s*\\n((?:[A-Z]['a-z]* ?)+)\", dialogue\n )\n\n if not match_turn_name:\n raise ValueError(\n f\"The dialogue does not match the expected format: {dialogue}\"\n )\n return (\n None # TODO Which should we use, return None or raise error?\n )\n\n turn, name = match_turn_name.groups()\n action_content = dialogue[\n len(match_turn_name.group(0)) :\n ].strip() # Extract the action content\n\n # Check for different action types\n if \"did nothing\" in action_content:\n action, argument = \"none\", \"\"\n elif match := re.match(r'said: \"(.*?)\"', action_content):\n action, argument = \"speak\", match.group(1)\n action, argument = action.strip(), argument.strip()\n elif match := re.match(r'\\[speak\\] said: \"(.*?)\"', action_content):\n action, argument = \"speak\", match.group(1)\n action, argument = action.strip(), argument.strip()\n elif match := re.match(\n r\"\\[(non-verbal communication|action)\\] (.*)\", action_content\n ):\n action, argument = match.groups()\n elif \"left the conversation\" in action_content:\n # TODO Make it more elegant to handle the situation of `left the conversation.`\n action, argument = \"leave\", \"\"\n else:\n action, argument = None, None\n\n parsed_item = {\n \"turn\": int(turn),\n \"name\": name.strip(),\n \"action\": action,\n \"argument\": argument,\n }\n return parsed_item\n\n def split_by_turn(self, input_string: str) -> list[str]:\n \"\"\"Split the input dialogue string by turn and return a list of dialogues.\"\"\"\n # Split using 'Turn #' as delimiter, but keep the delimiter in the results\n dialogues = re.split(r\"(?=Turn #?\\d+)\", input_string)\n # Remove any empty strings and strip whitespace\n dialogues = [\n dialogue.strip() for dialogue in dialogues if dialogue.strip()\n ]\n dialogues = [\n dialogue for dialogue in dialogues if dialogue.startswith(\"Turn\")\n ]\n # Change from Turn #x to Turn (#)x (# is optional)\n dialogues[-1] = \"\\n\".join(\n dialogues[-1].split(\"\\n\")[:2]\n ) # Discard further input in the last turn\n # print(\"Dialogues: \", dialogues)\n return dialogues\n\n @staticmethod\n def default_value_for_return_type() -> ScriptInteractionReturnType:\n results_1: list[list[tuple[str, str, Message]]] = [\n [\n (\n \"Environment\",\n name,\n Observation(\n last_turn=\"Environment is the agent\",\n turn_number=0,\n available_actions=[\"none\"],\n ),\n )\n for name in [\"none\", \"none\"]\n ]\n ]\n results_2: list[tuple[str, Message]] = [\n (\"\", AgentAction(action_type=\"none\", argument=\"\"))\n ]\n return (results_1, results_2)" }, { "identifier": "BaseSampler", "path": "sotopia/samplers/base_sampler.py", "snippet": "class BaseSampler(Generic[ObsType, ActType]):\n def __init__(\n self,\n env_candidates: Sequence[EnvironmentProfile | str] | None = None,\n agent_candidates: Sequence[AgentProfile | str] | None = None,\n ) -> None:\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 1,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:" }, { "identifier": "ConstraintBasedSampler", "path": "sotopia/samplers/constraint_based_sampler.py", "snippet": "class ConstraintBasedSampler(BaseSampler[ObsType, ActType]):\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 10,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:\n \"\"\"\n Sample an environment and a list of agents based on the constraints of the environment.\n\n Note: Sampling without replacement is only restricted to single env candidate.\n This is due to the fact that the number of possible combinations of env and agents is huge.\n Please sample for each env separately if you want to sample without replacement.\n \"\"\"\n assert (\n not isinstance(agent_classes, list)\n or len(agent_classes) == n_agent\n ), f\"agent_classes should be a list of length {n_agent} or a single agent class\"\n\n if not isinstance(agent_classes, list):\n agent_classes = [agent_classes] * n_agent\n assert (\n len(agents_params) == n_agent\n ), f\"agents_params should be a list of length {n_agent}\"\n\n env_profiles: list[EnvironmentProfile] = []\n agents_which_fit_scenario: list[list[str]] = []\n\n agent_candidate_ids: set[str] | None = None\n if self.agent_candidates:\n agent_candidate_ids = set(\n str(agent.pk) if not isinstance(agent, str) else agent\n for agent in self.agent_candidates\n )\n else:\n agent_candidate_ids = None\n\n if not replacement:\n assert self.env_candidates and len(self.env_candidates) == 1, (\n \"Sampling without replacement is only restricted to single env candidate (must be provided in the constructor). \"\n \"This is due to the fact that the number of possible combinations of env and agents is huge. \"\n \"Please sample for each env separately if you want to sample without replacement.\"\n )\n\n env_profile_id = (\n self.env_candidates[0].pk\n if not isinstance(self.env_candidates[0], str)\n else self.env_candidates[0]\n )\n\n assert env_profile_id, \"Env candidate must have an id\"\n\n agents_which_fit_scenario = _get_fit_agents_for_one_env(\n env_profile_id, agent_candidate_ids, size\n )\n env_profiles = (\n [EnvironmentProfile.get(env_profile_id)] * size\n if isinstance(self.env_candidates[0], str)\n else [self.env_candidates[0]] * size\n )\n else:\n for _ in range(size):\n if self.env_candidates:\n env_profile = random.choice(self.env_candidates)\n if isinstance(env_profile, str):\n env_profile = EnvironmentProfile.get(env_profile)\n else:\n env_profile_id = random.choice(\n list(EnvironmentProfile.all_pks())\n )\n env_profile = EnvironmentProfile.get(env_profile_id)\n env_profiles.append(env_profile)\n env_profile_id = env_profile.pk\n assert env_profile_id, \"Env candidate must have an id\"\n agents_which_fit_scenario.append(\n _get_fit_agents_for_one_env(\n env_profile_id, agent_candidate_ids, 1\n )[0]\n )\n\n assert (\n len(env_profiles) == size\n ), \"Number of env_profiles is not equal to size\"\n assert (\n len(agents_which_fit_scenario) == size\n ), \"Number of agents_which_fit_scenario is not equal to size\"\n\n for env_profile, agent_profile_id_list in zip(\n env_profiles, agents_which_fit_scenario\n ):\n env = ParallelSotopiaEnv(env_profile=env_profile, **env_params)\n agent_profiles = [\n AgentProfile.get(id) for id in agent_profile_id_list\n ]\n\n agents = [\n agent_class(agent_profile=agent_profile, **agent_params)\n for agent_class, agent_profile, agent_params in zip(\n agent_classes, agent_profiles, agents_params\n )\n ]\n # set goal for each agent\n for agent, goal in zip(agents, env.profile.agent_goals):\n agent.goal = goal\n\n yield env, agents" }, { "identifier": "UniformSampler", "path": "sotopia/samplers/uniform_sampler.py", "snippet": "class UniformSampler(BaseSampler[ObsType, ActType]):\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 1,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:\n \"\"\"\n Sample an environment and `n_agent` agents.\n\n Runtime checks:\n 1. If `agent_classes` is a list, it should have length `n_agent`.\n 2. `agents_params` should also be a list of length `n_agent`.\n\n Note: Currently, uniform sampling without replacement is not supported.\n This is due to the difficulty of sequentially sampling environment and agents.\n In theory, we can reject samples that have been sampled before, but this is not efficient.\n Please open an issue if you need this feature.\n \"\"\"\n assert (\n not isinstance(agent_classes, list)\n or len(agent_classes) == n_agent\n ), f\"agent_classes should be a list of length {n_agent} or a single agent class\"\n\n if not isinstance(agent_classes, list):\n agent_classes = [agent_classes] * n_agent\n assert (\n len(agents_params) == n_agent\n ), f\"agents_params should be a list of length {n_agent}\"\n\n assert (\n replacement\n ), \"Uniform sampling without replacement is not supported yet\"\n\n for _ in range(size):\n if self.env_candidates:\n env_profile = random.choice(self.env_candidates)\n if isinstance(env_profile, str):\n env_profile = EnvironmentProfile.get(env_profile)\n else:\n env_profile_id = random.choice(\n list(EnvironmentProfile.all_pks())\n )\n env_profile = EnvironmentProfile.get(env_profile_id)\n env = ParallelSotopiaEnv(env_profile=env_profile, **env_params)\n\n if self.agent_candidates:\n agent_profile_candidates = self.agent_candidates\n if len(agent_profile_candidates) < n_agent:\n raise ValueError(\n f\"Number of agent candidates ({len(agent_profile_candidates)}) is less than number of agents ({n_agent})\"\n )\n else:\n agent_profile_candidates_keys = list(AgentProfile.all_pks())\n if len(agent_profile_candidates_keys) < n_agent:\n raise ValueError(\n f\"Number of agent profile candidates ({len(agent_profile_candidates_keys)}) in database is less than number of agents ({n_agent})\"\n )\n agent_profile_candidates = [\n AgentProfile.get(pk=pk)\n for pk in agent_profile_candidates_keys\n ]\n\n if len(agent_profile_candidates) == n_agent:\n agent_profiles_maybe_id = agent_profile_candidates\n else:\n agent_profiles_maybe_id = random.sample(\n agent_profile_candidates, n_agent\n )\n agent_profiles = [\n i if isinstance(i, AgentProfile) else AgentProfile.get(i)\n for i in agent_profiles_maybe_id\n ]\n agents = [\n agent_class(agent_profile=agent_profile, **agent_params)\n for agent_class, agent_profile, agent_params in zip(\n agent_classes, agent_profiles, agents_params\n )\n ]\n # set goal for each agent\n for agent, goal in zip(agents, env.profile.agent_goals):\n agent.goal = goal\n\n yield env, agents" } ]
import asyncio import functools import itertools import logging import gin import rich from typing import Callable, Literal, Sequence, Type, cast from beartype import beartype from tqdm.asyncio import tqdm_asyncio from sotopia.agents import ( Agents, HumanAgent, LLMAgent, RedisAgent, ScriptWritingAgent, SpeakAgent, ) from sotopia.agents.base_agent import BaseAgent from sotopia.database import EpisodeLog from sotopia.database.persistent_profile import ( AgentProfile, EnvironmentProfile, ) from sotopia.envs import ParallelSotopiaEnv from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, unweighted_aggregate_evaluate, ) from sotopia.generation_utils.generate import LLM_Name, agenerate_script from sotopia.messages import AgentAction, Message, Observation from sotopia.messages.message_classes import ( ScriptBackground, ScriptEnvironmentResponse, ScriptInteraction, ) from sotopia.samplers import ( BaseSampler, ConstraintBasedSampler, EnvAgentCombo, UniformSampler, )
18,562
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else: agents[agent_name] = LLMAgent(agent_name, model_name=agent_model) agents.reset() messages: list[tuple[str, str, Message]] = [] # Main Event Loop done = False for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) while not done: # gather agent messages agent_messages: dict[str, AgentAction] = dict() for agent_name in env.agents: if agents_info is not None: agents[agent_name].goal = agents_info[agent_name]["goal"] agent_messages[agent_name] = agents[agent_name].act( environment_messages[agent_name] ) messages.append( (agent_name, "Environment", agent_messages[agent_name]) ) # send agent messages to environment environment_messages, _, terminated, ___, ____ = env.step( agent_messages ) for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) done = all(terminated.values()) return messages @gin.configurable async def arun_one_episode( env: ParallelSotopiaEnv,
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else: agents[agent_name] = LLMAgent(agent_name, model_name=agent_model) agents.reset() messages: list[tuple[str, str, Message]] = [] # Main Event Loop done = False for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) while not done: # gather agent messages agent_messages: dict[str, AgentAction] = dict() for agent_name in env.agents: if agents_info is not None: agents[agent_name].goal = agents_info[agent_name]["goal"] agent_messages[agent_name] = agents[agent_name].act( environment_messages[agent_name] ) messages.append( (agent_name, "Environment", agent_messages[agent_name]) ) # send agent messages to environment environment_messages, _, terminated, ___, ____ = env.step( agent_messages ) for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) done = all(terminated.values()) return messages @gin.configurable async def arun_one_episode( env: ParallelSotopiaEnv,
agent_list: Sequence[BaseAgent[Observation, AgentAction]],
6
2023-10-23 19:47:26+00:00
24k
f0uriest/interpax
tests/test_interpolate.py
[ { "identifier": "fft_interp1d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=\"n\")\ndef fft_interp1d(f: jax.Array, n: int, sx: jax.Array = None, dx: float = 1.0):\n \"\"\"Interpolation of a 1d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray, shape(nx, ...)\n Source data. Assumed to cover 1 full period, excluding the endpoint.\n n : int\n Number of desired interpolation points.\n sx : ndarray or None\n Shift in x to evaluate at. If original data is f(x), interpolates to f(x + sx)\n dx : float\n Spacing of source points\n\n Returns\n -------\n fi : ndarray, shape(n, ..., len(sx))\n Interpolated (and possibly shifted) data points\n \"\"\"\n c = jnp.fft.ifft(f, axis=0)\n nx = c.shape[0]\n if sx is not None:\n sx = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(nx)[:, None] * sx / dx)\n c = (c[None].T * sx).T\n c = jnp.moveaxis(c, 0, -1)\n pad = ((n - nx) // 2, n - nx - (n - nx) // 2)\n if nx % 2 != 0:\n pad = pad[::-1]\n c = jnp.fft.ifftshift(_pad_along_axis(jnp.fft.fftshift(c, axes=0), pad, axis=0))\n return jnp.fft.fft(c, axis=0).real" }, { "identifier": "fft_interp2d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=(\"n1\", \"n2\"))\ndef fft_interp2d(\n f: jax.Array,\n n1: int,\n n2: int,\n sx: jax.Array = None,\n sy: jax.Array = None,\n dx: float = 1.0,\n dy: float = 1.0,\n):\n \"\"\"Interpolation of a 2d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray, shape(nx, ny, ...)\n Source data. Assumed to cover 1 full period, excluding the endpoint.\n n1, n2 : int\n Number of desired interpolation points in x and y directions\n sx, sy : ndarray or None\n Shift in x and y to evaluate at. If original data is f(x,y), interpolates to\n f(x + sx, y + sy). Both must be provided or None\n dx, dy : float\n Spacing of source points in x and y\n\n Returns\n -------\n fi : ndarray, shape(n1, n2, ..., len(sx))\n Interpolated (and possibly shifted) data points\n \"\"\"\n c = jnp.fft.ifft2(f, axes=(0, 1))\n nx, ny = c.shape[:2]\n if (sx is not None) and (sy is not None):\n sx = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(nx)[:, None] * sx / dx)\n sy = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(ny)[:, None] * sy / dy)\n c = (c[None].T * sx[None, :, :] * sy[:, None, :]).T\n c = jnp.moveaxis(c, 0, -1)\n padx = ((n1 - nx) // 2, n1 - nx - (n1 - nx) // 2)\n pady = ((n2 - ny) // 2, n2 - ny - (n2 - ny) // 2)\n if nx % 2 != 0:\n padx = padx[::-1]\n if ny % 2 != 0:\n pady = pady[::-1]\n\n c = jnp.fft.ifftshift(\n _pad_along_axis(jnp.fft.fftshift(c, axes=0), padx, axis=0), axes=0\n )\n c = jnp.fft.ifftshift(\n _pad_along_axis(jnp.fft.fftshift(c, axes=1), pady, axis=1), axes=1\n )\n\n return jnp.fft.fft2(c, axes=(0, 1)).real" }, { "identifier": "Interpolator1D", "path": "interpax/_spline.py", "snippet": "class Interpolator1D(eqx.Module):\n \"\"\"Convenience class for representing a 1D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the\n data, and will not introduce new extrema in the interpolated points\n - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at\n both endpoints\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as a 2 element array or tuple to specify different conditions\n for xq<x[0] and x[-1]<xq\n period : float > 0, None\n periodicity of the function. If given, function is assumed to be periodic\n on the interval [0,period]. None denotes no periodicity\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float] = None,\n **kwargs,\n ):\n x, f = map(jnp.asarray, (x, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n\n errorif(\n (len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_1D, ValueError, f\"unknown method {method}\")\n\n self.x = x\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, axis, **kwargs)\n\n self.derivs = {\"fx\": fx}\n\n def __call__(self, xq: jax.Array, dx: int = 0):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n Query points where interpolation is desired\n dx : int >= 0\n Derivative to take.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp1d(\n xq,\n self.x,\n self.f,\n self.method,\n dx,\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "Interpolator2D", "path": "interpax/_spline.py", "snippet": "class Interpolator2D(eqx.Module):\n \"\"\"Convenience class for representing a 2D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n y: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float, tuple]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n y: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n ):\n x, y, f = map(jnp.asarray, (x, y, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fxy = kwargs.pop(\"fxy\", None)\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_2D, ValueError, f\"unknown method {method}\")\n\n self.x = x\n self.y = y\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n\n self.derivs = {\"fx\": fx, \"fy\": fy, \"fxy\": fxy}\n\n def __call__(self, xq: jax.Array, yq: jax.Array, dx: int = 0, dy: int = 0):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq, yq : ndarray, shape(Nq,)\n x, y query points where interpolation is desired\n dx, dy : int >= 0\n Derivative to take in x, y directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp2d(\n xq,\n yq,\n self.x,\n self.y,\n self.f,\n self.method,\n (dx, dy),\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "Interpolator3D", "path": "interpax/_spline.py", "snippet": "class Interpolator3D(eqx.Module):\n \"\"\"Convenience class for representing a 3D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n z : ndarray, shape(Nz,)\n z coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,Nz,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y, z directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n y: jax.Array\n z: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float, tuple]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n y: jax.Array,\n z: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n ):\n x, y, z, f = map(jnp.asarray, (x, y, z, f))\n axis = kwargs.get(\"axis\", 0)\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(\n (len(z) != f.shape[2]) or (z.ndim != 1),\n ValueError,\n \"z and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_3D, ValueError, f\"unknown method {method}\")\n\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fz = kwargs.pop(\"fz\", None)\n fxy = kwargs.pop(\"fxy\", None)\n fxz = kwargs.pop(\"fxz\", None)\n fyz = kwargs.pop(\"fyz\", None)\n fxyz = kwargs.pop(\"fxyz\", None)\n\n self.x = x\n self.y = y\n self.z = z\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fz is None:\n fz = approx_df(z, f, method, 2, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n if fxz is None:\n fxz = approx_df(z, fx, method, 2, **kwargs)\n if fyz is None:\n fyz = approx_df(z, fy, method, 2, **kwargs)\n if fxyz is None:\n fxyz = approx_df(z, fxy, method, 2, **kwargs)\n\n self.derivs = {\n \"fx\": fx,\n \"fy\": fy,\n \"fz\": fz,\n \"fxy\": fxy,\n \"fxz\": fxz,\n \"fyz\": fyz,\n \"fxyz\": fxyz,\n }\n\n def __call__(\n self,\n xq: jax.Array,\n yq: jax.Array,\n zq: jax.Array,\n dx: int = 0,\n dy: int = 0,\n dz: int = 0,\n ):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq, yq, zq : ndarray, shape(Nq,)\n x, y, z query points where interpolation is desired\n dx, dy, dz : int >= 0\n Derivative to take in x, y, z directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp3d(\n xq,\n yq,\n zq,\n self.x,\n self.y,\n self.z,\n self.f,\n self.method,\n (dx, dy, dz),\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "interp1d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp1d(\n xq: jax.Array,\n x: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 1d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n query points where interpolation is desired\n x : ndarray, shape(Nx,)\n coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the\n data, and will not introduce new extrema in the interpolated points\n - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at\n both endpoints\n\n derivative : int >= 0\n derivative order to calculate\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as a 2 element array or tuple to specify different conditions\n for xq<x[0] and x[-1]<xq\n period : float > 0, None\n periodicity of the function. If given, function is assumed to be periodic\n on the interval [0,period]. None denotes no periodicity\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, f data, recommend using Interpolator1D\n which caches the calculation of the derivatives and spline coefficients.\n\n \"\"\"\n xq, x, f = map(jnp.asarray, (xq, x, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n outshape = xq.shape + f.shape[1:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq = jnp.atleast_1d(xq)\n\n errorif(\n (len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_1D, ValueError, f\"unknown method {method}\")\n\n lowx, highx = _parse_extrap(extrap, 1)\n\n if period is not None:\n xq, x, f, fx = _make_periodic(xq, x, period, axis, f, fx)\n lowx = highx = True\n\n if method == \"nearest\":\n\n def derivative0():\n i = jnp.argmin(jnp.abs(xq[:, np.newaxis] - x[np.newaxis]), axis=1)\n return f[i]\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[1:]))\n\n fq = jax.lax.switch(derivative, [derivative0, derivative1])\n\n elif method == \"linear\":\n\n def derivative0():\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)\n dx = x[i] - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n delta = xq - x[i - 1]\n fq = jnp.where(\n (dx == 0),\n jnp.take(f, i, axis).T,\n jnp.take(f, i - 1, axis).T + (delta * dxi * df.T),\n ).T\n return fq\n\n def derivative1():\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)\n dx = x[i] - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n return (df.T * dxi).T\n\n def derivative2():\n return jnp.zeros((xq.size, *f.shape[1:]))\n\n fq = jax.lax.switch(derivative, [derivative0, derivative1, derivative2])\n\n elif method in (CUBIC_METHODS + (\"monotonic\", \"monotonic-0\")):\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n if fx is None:\n fx = approx_df(x, f, method, axis, **kwargs)\n assert fx.shape == f.shape\n\n dx = x[i] - x[i - 1]\n delta = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n t = delta * dxi\n\n f0 = jnp.take(f, i - 1, axis)\n f1 = jnp.take(f, i, axis)\n fx0 = (jnp.take(fx, i - 1, axis).T * dx).T\n fx1 = (jnp.take(fx, i, axis).T * dx).T\n\n F = jnp.stack([f0, f1, fx0, fx1], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_CUBIC, F).T\n ttx = _get_t_der(t, derivative, dxi)\n fq = jnp.einsum(\"ji...,ij->i...\", coef, ttx)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n return fq.reshape(outshape)" }, { "identifier": "interp2d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp2d( # noqa: C901 - FIXME: break this up into simpler pieces\n xq: jax.Array,\n yq: jax.Array,\n x: jax.Array,\n y: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 2d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n x query points where interpolation is desired\n yq : ndarray, shape(Nq,)\n y query points where interpolation is desired\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n derivative : int >= 0 or array-like, shape(2,)\n derivative order to calculate in x, y. Use a single value for the same in both\n directions.\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, y, f data, recommend using\n Interpolator2D which caches the calculation of the derivatives and spline\n coefficients.\n\n \"\"\"\n xq, yq, x, y, f = map(jnp.asarray, (xq, yq, x, y, f))\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fxy = kwargs.pop(\"fxy\", None)\n xq, yq = jnp.broadcast_arrays(xq, yq)\n outshape = xq.shape + f.shape[2:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq, yq = map(jnp.atleast_1d, (xq, yq))\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_2D, ValueError, f\"unknown method {method}\")\n\n periodx, periody = _parse_ndarg(period, 2)\n derivative_x, derivative_y = _parse_ndarg(derivative, 2)\n lowx, highx, lowy, highy = _parse_extrap(extrap, 2)\n\n if periodx is not None:\n xq, x, f, fx, fy, fxy = _make_periodic(xq, x, periodx, 0, f, fx, fy, fxy)\n lowx = highx = True\n if periody is not None:\n yq, y, f, fx, fy, fxy = _make_periodic(yq, y, periody, 1, f, fx, fy, fxy)\n lowy = highy = True\n\n if method == \"nearest\":\n\n def derivative0():\n # because of the regular spaced grid we know that the nearest point\n # will be one of the 4 neighbors on the grid, so we first find those\n # and then take the nearest one among them.\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n neighbors_x = jnp.array(\n [[x[i], x[i - 1], x[i], x[i - 1]], [y[j], y[j], y[j - 1], y[j - 1]]]\n )\n neighbors_f = jnp.array(\n [f[i, j].T, f[i - 1, j].T, f[i, j - 1].T, f[i - 1, j - 1].T]\n )\n xyq = jnp.array([xq, yq])\n dist = jnp.linalg.norm(neighbors_x - xyq[:, None, :], axis=0)\n idx = jnp.argmin(dist, axis=0)\n return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[2:]))\n\n fq = jax.lax.cond(\n (derivative_x == 0) & (derivative_y == 0), derivative0, derivative1\n )\n\n elif method == \"linear\":\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n\n f00 = f[i - 1, j - 1]\n f01 = f[i - 1, j]\n f10 = f[i, j - 1]\n f11 = f[i, j]\n x0 = x[i - 1]\n x1 = x[i]\n y0 = y[j - 1]\n y1 = y[j]\n dx = x1 - x0\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n dy = y1 - y0\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n\n dx0 = lambda: jnp.array([x1 - xq, xq - x0])\n dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])\n dx2 = lambda: jnp.zeros((2, xq.size))\n dy0 = lambda: jnp.array([y1 - yq, yq - y0])\n dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])\n dy2 = lambda: jnp.zeros((2, yq.size))\n\n tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])\n ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])\n F = jnp.array([[f00, f01], [f10, f11]])\n fq = (dxi * dyi * jnp.einsum(\"ijk...,ik,jk->k...\", F, tx, ty).T).T\n\n elif method in CUBIC_METHODS:\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n assert fx.shape == fy.shape == fxy.shape == f.shape\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n\n dx = x[i] - x[i - 1]\n deltax = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n tx = deltax * dxi\n dy = y[j] - y[j - 1]\n deltay = yq - y[j - 1]\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n ty = deltay * dyi\n\n fs = OrderedDict()\n fs[\"f\"] = f\n fs[\"fx\"] = fx\n fs[\"fy\"] = fy\n fs[\"fxy\"] = fxy\n fsq = OrderedDict()\n for ff in fs.keys():\n for jj in [0, 1]:\n for ii in [0, 1]:\n s = ff + str(ii) + str(jj)\n fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj]\n if \"x\" in ff:\n fsq[s] = (dx * fsq[s].T).T\n if \"y\" in ff:\n fsq[s] = (dy * fsq[s].T).T\n\n F = jnp.stack([foo for foo in fsq.values()], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_BICUBIC, F).T\n coef = jnp.moveaxis(coef.reshape((4, 4, *coef.shape[1:]), order=\"F\"), 2, 0)\n ttx = _get_t_der(tx, derivative_x, dxi)\n tty = _get_t_der(ty, derivative_y, dyi)\n fq = jnp.einsum(\"ijk...,ij,ik->i...\", coef, ttx, tty)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n fq = _extrap(yq, fq, y, lowy, highy)\n\n return fq.reshape(outshape)" }, { "identifier": "interp3d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp3d( # noqa: C901 - FIXME: break this up into simpler pieces\n xq: jax.Array,\n yq: jax.Array,\n zq: jax.Array,\n x: jax.Array,\n y: jax.Array,\n z: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 3d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n x query points where interpolation is desired\n yq : ndarray, shape(Nq,)\n y query points where interpolation is desired\n zq : ndarray, shape(Nq,)\n z query points where interpolation is desired\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n z : ndarray, shape(Nz,)\n z coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,Nz,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n derivative : int >= 0, array-like, shape(3,)\n derivative order to calculate in x,y,z directions. Use a single value for the\n same in all directions.\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions for\n [[xlow, xhigh],[ylow,yhigh],[zlow,zhigh]]\n period : float > 0, None, array-like, shape(3,)\n periodicity of the function in x, y, z directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in all directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, y, z, f data, recommend using\n Interpolator3D which caches the calculation of the derivatives and spline\n coefficients.\n\n \"\"\"\n xq, yq, zq, x, y, z, f = map(jnp.asarray, (xq, yq, zq, x, y, z, f))\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(\n (len(z) != f.shape[2]) or (z.ndim != 1),\n ValueError,\n \"z and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_3D, ValueError, f\"unknown method {method}\")\n\n xq, yq, zq = jnp.broadcast_arrays(xq, yq, zq)\n outshape = xq.shape + f.shape[3:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq, yq, zq = map(jnp.atleast_1d, (xq, yq, zq))\n\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fz = kwargs.pop(\"fz\", None)\n fxy = kwargs.pop(\"fxy\", None)\n fxz = kwargs.pop(\"fxz\", None)\n fyz = kwargs.pop(\"fyz\", None)\n fxyz = kwargs.pop(\"fxyz\", None)\n\n periodx, periody, periodz = _parse_ndarg(period, 3)\n derivative_x, derivative_y, derivative_z = _parse_ndarg(derivative, 3)\n lowx, highx, lowy, highy, lowz, highz = _parse_extrap(extrap, 3)\n\n if periodx is not None:\n xq, x, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n xq, x, periodx, 0, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowx = highx = True\n if periody is not None:\n yq, y, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n yq, y, periody, 1, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowy = highy = True\n if periodz is not None:\n zq, z, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n zq, z, periodz, 2, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowz = highz = True\n\n if method == \"nearest\":\n\n def derivative0():\n # because of the regular spaced grid we know that the nearest point\n # will be one of the 8 neighbors on the grid, so we first find those\n # and then take the nearest one among them.\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n neighbors_x = jnp.array(\n [\n [x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1]],\n [y[j], y[j], y[j - 1], y[j - 1], y[j], y[j], y[j - 1], y[j - 1]],\n [z[k], z[k], z[k], z[k], z[k - 1], z[k - 1], z[k - 1], z[k - 1]],\n ]\n )\n neighbors_f = jnp.array(\n [\n f[i, j, k].T,\n f[i - 1, j, k].T,\n f[i, j - 1, k].T,\n f[i - 1, j - 1, k].T,\n f[i, j, k - 1].T,\n f[i - 1, j, k - 1].T,\n f[i, j - 1, k - 1].T,\n f[i - 1, j - 1, k - 1].T,\n ]\n )\n xyzq = jnp.array([xq, yq, zq])\n dist = jnp.linalg.norm(neighbors_x - xyzq[:, None, :], axis=0)\n idx = jnp.argmin(dist, axis=0)\n return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[3:]))\n\n fq = jax.lax.cond(\n (derivative_x == 0) & (derivative_y == 0) & (derivative_z == 0),\n derivative0,\n derivative1,\n )\n\n elif method == \"linear\":\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n\n f000 = f[i - 1, j - 1, k - 1]\n f001 = f[i - 1, j - 1, k]\n f010 = f[i - 1, j, k - 1]\n f100 = f[i, j - 1, k - 1]\n f110 = f[i, j, k - 1]\n f011 = f[i - 1, j, k]\n f101 = f[i, j - 1, k]\n f111 = f[i, j, k]\n x0 = x[i - 1]\n x1 = x[i]\n y0 = y[j - 1]\n y1 = y[j]\n z0 = z[k - 1]\n z1 = z[k]\n dx = x1 - x0\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n dy = y1 - y0\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n dz = z1 - z0\n dzi = jnp.where(dz == 0, 0, 1 / dz)\n\n dx0 = lambda: jnp.array([x1 - xq, xq - x0])\n dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])\n dx2 = lambda: jnp.zeros((2, xq.size))\n dy0 = lambda: jnp.array([y1 - yq, yq - y0])\n dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])\n dy2 = lambda: jnp.zeros((2, yq.size))\n dz0 = lambda: jnp.array([z1 - zq, zq - z0])\n dz1 = lambda: jnp.array([-jnp.ones_like(zq), jnp.ones_like(zq)])\n dz2 = lambda: jnp.zeros((2, zq.size))\n\n tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])\n ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])\n tz = jax.lax.switch(derivative_z, [dz0, dz1, dz2])\n\n F = jnp.array([[[f000, f001], [f010, f011]], [[f100, f101], [f110, f111]]])\n fq = (dxi * dyi * dzi * jnp.einsum(\"lijk...,lk,ik,jk->k...\", F, tx, ty, tz).T).T\n\n elif method in CUBIC_METHODS:\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fz is None:\n fz = approx_df(z, f, method, 2, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n if fxz is None:\n fxz = approx_df(z, fx, method, 2, **kwargs)\n if fyz is None:\n fyz = approx_df(z, fy, method, 2, **kwargs)\n if fxyz is None:\n fxyz = approx_df(z, fxy, method, 2, **kwargs)\n assert (\n fx.shape\n == fy.shape\n == fz.shape\n == fxy.shape\n == fxz.shape\n == fyz.shape\n == fxyz.shape\n == f.shape\n )\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n\n dx = x[i] - x[i - 1]\n deltax = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n tx = deltax * dxi\n\n dy = y[j] - y[j - 1]\n deltay = yq - y[j - 1]\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n ty = deltay * dyi\n\n dz = z[k] - z[k - 1]\n deltaz = zq - z[k - 1]\n dzi = jnp.where(dz == 0, 0, 1 / dz)\n tz = deltaz * dzi\n\n fs = OrderedDict()\n fs[\"f\"] = f\n fs[\"fx\"] = fx\n fs[\"fy\"] = fy\n fs[\"fz\"] = fz\n fs[\"fxy\"] = fxy\n fs[\"fxz\"] = fxz\n fs[\"fyz\"] = fyz\n fs[\"fxyz\"] = fxyz\n fsq = OrderedDict()\n for ff in fs.keys():\n for kk in [0, 1]:\n for jj in [0, 1]:\n for ii in [0, 1]:\n s = ff + str(ii) + str(jj) + str(kk)\n fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj, k - 1 + kk]\n if \"x\" in ff:\n fsq[s] = (dx * fsq[s].T).T\n if \"y\" in ff:\n fsq[s] = (dy * fsq[s].T).T\n if \"z\" in ff:\n fsq[s] = (dz * fsq[s].T).T\n\n F = jnp.stack([foo for foo in fsq.values()], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_TRICUBIC, F).T\n coef = jnp.moveaxis(coef.reshape((4, 4, 4, *coef.shape[1:]), order=\"F\"), 3, 0)\n ttx = _get_t_der(tx, derivative_x, dxi)\n tty = _get_t_der(ty, derivative_y, dyi)\n ttz = _get_t_der(tz, derivative_z, dzi)\n fq = jnp.einsum(\"lijk...,li,lj,lk->l...\", coef, ttx, tty, ttz)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n fq = _extrap(yq, fq, y, lowy, highy)\n fq = _extrap(zq, fq, z, lowz, highz)\n\n return fq.reshape(outshape)" } ]
import jax import jax.numpy as jnp import numpy as np import pytest from jax import config as jax_config from interpax import ( Interpolator1D, Interpolator2D, Interpolator3D, fft_interp1d, fft_interp2d, interp1d, interp2d, interp3d, )
16,942
fq = interp(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-3, atol=1e-1) atol = 5.5e-3 rtol = 1e-5 fq = interp(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp3d_vector_valued(self): """Test for interpolating vector valued function.""" x = np.linspace(0, np.pi, 1000) y = np.linspace(0, 2 * np.pi, 1000) z = np.linspace(0, 3, 1000) xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.array( [np.sin(x) * np.cos(y) * z**2, 0.1 * (x + y - z)] ) fp = f(xxp.T, yyp.T, zzp.T).T fq = interp3d(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-2, atol=1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-3, atol=1e-1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-5, atol=5e-3) @pytest.mark.unit def test_fft_interp1d(): """Test for 1d Fourier interpolation.""" def fun(x): return 2 * np.sin(1 * x) + 4 * np.cos(3 * x) + 1 x = {"o": {}, "e": {}} x["o"][1] = np.linspace(0, 2 * np.pi, 33, endpoint=False) x["e"][1] = np.linspace(0, 2 * np.pi, 32, endpoint=False) x["o"][2] = np.linspace(0, 2 * np.pi, 133, endpoint=False) x["e"][2] = np.linspace(0, 2 * np.pi, 132, endpoint=False) f1 = {} for p in ["o", "e"]: f1[p] = {} for i in [1, 2]: f1[p][i] = fun(x[p][i]) for sp in ["o", "e"]: # source parity fi = f1[sp][1] fs = fun(x[sp][1] + 0.2) np.testing.assert_allclose( fs, fft_interp1d(fi, *fi.shape, sx=0.2, dx=np.diff(x[sp][1])[0]).squeeze() ) for ep in ["o", "e"]: # eval parity for s in ["up", "down"]: # up or downsample if s == "up": xs = 1 xe = 2 else: xs = 2 xe = 1 true = fun(x[ep][xe]) interp = fft_interp1d(f1[sp][xs], x[ep][xe].size) np.testing.assert_allclose(true, interp, atol=1e-12, rtol=1e-12) @pytest.mark.unit def test_fft_interp2d(): """Test for 2d Fourier interpolation.""" def fun2(x, y): return ( 2 * np.sin(1 * x[:, None]) - 1.2 * np.cos(2 * x[:, None]) + 3 * np.cos(3 * y[None]) - 2 * np.cos(5 * y[None]) + 1 ) x = {"o": {}, "e": {}} y = {"o": {}, "e": {}} x["o"][1] = np.linspace(0, 2 * np.pi, 33, endpoint=False) x["e"][1] = np.linspace(0, 2 * np.pi, 32, endpoint=False) x["o"][2] = np.linspace(0, 2 * np.pi, 133, endpoint=False) x["e"][2] = np.linspace(0, 2 * np.pi, 132, endpoint=False) y["o"][1] = np.linspace(0, 2 * np.pi, 33, endpoint=False) y["e"][1] = np.linspace(0, 2 * np.pi, 32, endpoint=False) y["o"][2] = np.linspace(0, 2 * np.pi, 133, endpoint=False) y["e"][2] = np.linspace(0, 2 * np.pi, 132, endpoint=False) f2 = {} for xp in ["o", "e"]: f2[xp] = {} for yp in ["o", "e"]: f2[xp][yp] = {} for i in [1, 2]: f2[xp][yp][i] = {} for j in [1, 2]: f2[xp][yp][i][j] = fun2(x[xp][i], y[yp][j]) for spx in ["o", "e"]: # source parity x for spy in ["o", "e"]: # source parity y fi = f2[spx][spy][1][1] fs = fun2(x[spx][1] + 0.2, y[spy][1] + 0.3) np.testing.assert_allclose( fs,
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp) interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs) interp2 = lambda xq, *args, **kwargs: Interpolator1D(*args, **kwargs)(xq) for interp in [interp1, interp2]: fq = interp(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x), rtol=1e-2, atol=1e-1) fq = interp(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 2 * np.pi, 100) x = np.linspace(0, 2 * np.pi, 300)[10:-10] f = lambda x: np.array([np.sin(x), np.cos(x)]) fp = f(xp).T fq = interp1d(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x).T, rtol=1e-2, atol=1e-1) fq = interp1d(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_extrap_periodic(self): """Test extrapolation and periodic BC of 1d interpolation.""" xp = np.linspace(0, 2 * np.pi, 200) x = np.linspace(-1, 2 * np.pi + 1, 10000) f = lambda x: np.sin(x) fp = f(xp) fq = interp1d(x, xp, fp, method="cubic", extrap=False) assert np.isnan(fq[0]) assert np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", extrap=True) assert not np.isnan(fq[0]) assert not np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", period=2 * np.pi) np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-2) @pytest.mark.unit def test_interp1d_monotonic(self): """Ensure monotonic interpolation is actually monotonic.""" # true function is just linear with a jump discontinuity at x=1.5 x = np.linspace(-4, 5, 10) f = np.heaviside(x - 1.5, 0) + 0.1 * x xq = np.linspace(-4, 5, 1000) dfc = interp1d(xq, x, f, derivative=1, method="cubic") dfm = interp1d(xq, x, f, derivative=1, method="monotonic") dfm0 = interp1d(xq, x, f, derivative=1, method="monotonic-0") assert dfc.min() < 0 # cubic interpolation undershoots, giving negative slope assert dfm.min() > 0 # monotonic interpolation doesn't assert dfm0.min() >= 0 # monotonic-0 doesn't overshoot either # ensure monotonic-0 has 0 slope at end points np.testing.assert_allclose(dfm0[np.array([0, -1])], 0, atol=1e-12) class TestInterp2D: """Tests for interp2d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y", [ (np.linspace(0, 3 * np.pi, 1000), np.linspace(0, 2 * np.pi, 1000)), (0.0, 0.0), ], ) def test_interp2d(self, x, y): """Test accuracy of different 2d interpolation methods.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.sin(x) * np.cos(y) fp = f(xxp, yyp) interp1 = lambda xq, yq, *args, **kwargs: interp2d(xq, yq, *args, **kwargs) interp2 = lambda xq, yq, *args, **kwargs: Interpolator2D(*args, **kwargs)( xq, yq ) for interp in [interp1, interp2]: fq = interp( x, y, xp, yp, fp, method="nearest", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-2, atol=1) fq = interp( x, y, xp, yp, fp, method="linear", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-4, atol=1e-2) atol = 2e-3 rtol = 1e-5 fq = interp(x, y, xp, yp, fp, method="cubic", period=(2 * np.pi, 2 * np.pi)) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cubic2", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="catmull-rom", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cardinal", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp2d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) x = np.linspace(0, 3 * np.pi, 200) y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp) interp1 = lambda xq, yq, zq, *args, **kwargs: interp3d( xq, yq, zq, *args, **kwargs ) interp2 = lambda xq, yq, zq, *args, **kwargs: Interpolator3D(*args, **kwargs)( xq, yq, zq ) for interp in [interp1, interp2]: fq = interp(x, y, z, xp, yp, zp, fp) np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-5, atol=1e-2) fq = interp(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-2, atol=1) fq = interp(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-3, atol=1e-1) atol = 5.5e-3 rtol = 1e-5 fq = interp(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp3d_vector_valued(self): """Test for interpolating vector valued function.""" x = np.linspace(0, np.pi, 1000) y = np.linspace(0, 2 * np.pi, 1000) z = np.linspace(0, 3, 1000) xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.array( [np.sin(x) * np.cos(y) * z**2, 0.1 * (x + y - z)] ) fp = f(xxp.T, yyp.T, zzp.T).T fq = interp3d(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-2, atol=1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-3, atol=1e-1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-5, atol=5e-3) @pytest.mark.unit def test_fft_interp1d(): """Test for 1d Fourier interpolation.""" def fun(x): return 2 * np.sin(1 * x) + 4 * np.cos(3 * x) + 1 x = {"o": {}, "e": {}} x["o"][1] = np.linspace(0, 2 * np.pi, 33, endpoint=False) x["e"][1] = np.linspace(0, 2 * np.pi, 32, endpoint=False) x["o"][2] = np.linspace(0, 2 * np.pi, 133, endpoint=False) x["e"][2] = np.linspace(0, 2 * np.pi, 132, endpoint=False) f1 = {} for p in ["o", "e"]: f1[p] = {} for i in [1, 2]: f1[p][i] = fun(x[p][i]) for sp in ["o", "e"]: # source parity fi = f1[sp][1] fs = fun(x[sp][1] + 0.2) np.testing.assert_allclose( fs, fft_interp1d(fi, *fi.shape, sx=0.2, dx=np.diff(x[sp][1])[0]).squeeze() ) for ep in ["o", "e"]: # eval parity for s in ["up", "down"]: # up or downsample if s == "up": xs = 1 xe = 2 else: xs = 2 xe = 1 true = fun(x[ep][xe]) interp = fft_interp1d(f1[sp][xs], x[ep][xe].size) np.testing.assert_allclose(true, interp, atol=1e-12, rtol=1e-12) @pytest.mark.unit def test_fft_interp2d(): """Test for 2d Fourier interpolation.""" def fun2(x, y): return ( 2 * np.sin(1 * x[:, None]) - 1.2 * np.cos(2 * x[:, None]) + 3 * np.cos(3 * y[None]) - 2 * np.cos(5 * y[None]) + 1 ) x = {"o": {}, "e": {}} y = {"o": {}, "e": {}} x["o"][1] = np.linspace(0, 2 * np.pi, 33, endpoint=False) x["e"][1] = np.linspace(0, 2 * np.pi, 32, endpoint=False) x["o"][2] = np.linspace(0, 2 * np.pi, 133, endpoint=False) x["e"][2] = np.linspace(0, 2 * np.pi, 132, endpoint=False) y["o"][1] = np.linspace(0, 2 * np.pi, 33, endpoint=False) y["e"][1] = np.linspace(0, 2 * np.pi, 32, endpoint=False) y["o"][2] = np.linspace(0, 2 * np.pi, 133, endpoint=False) y["e"][2] = np.linspace(0, 2 * np.pi, 132, endpoint=False) f2 = {} for xp in ["o", "e"]: f2[xp] = {} for yp in ["o", "e"]: f2[xp][yp] = {} for i in [1, 2]: f2[xp][yp][i] = {} for j in [1, 2]: f2[xp][yp][i][j] = fun2(x[xp][i], y[yp][j]) for spx in ["o", "e"]: # source parity x for spy in ["o", "e"]: # source parity y fi = f2[spx][spy][1][1] fs = fun2(x[spx][1] + 0.2, y[spy][1] + 0.3) np.testing.assert_allclose( fs,
fft_interp2d(
1
2023-10-18 13:12:20+00:00
24k
city96/ComfyUI_ExtraModels
PixArt/sampler.py
[ { "identifier": "gaussian_diffusion", "path": "PixArt/sampling/gaussian_diffusion.py", "snippet": "def mean_flat(tensor):\n def is_vb(self):\ndef _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):\ndef get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):\ndef get_named_beta_schedule(schedule_name, num_diffusion_timesteps):\ndef betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):\n def __init__(\n self,\n *,\n betas,\n model_mean_type,\n model_var_type,\n loss_type,\n snr=False\n ):\n def q_mean_variance(self, x_start, t):\n def q_sample(self, x_start, t, noise=None):\n def q_posterior_mean_variance(self, x_start, x_t, t):\n def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):\n def process_xstart(x):\n def _predict_xstart_from_eps(self, x_t, t, eps):\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n ):\n def p_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n ):\n def p_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n ):\n def ddim_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_reverse_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n ):\n def ddim_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n ):\n def _vb_terms_bpd(\n self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None\n ):\n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):\n def _prior_bpd(self, x_start):\n def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):\ndef _extract_into_tensor(arr, timesteps, broadcast_shape):\nclass ModelMeanType(enum.Enum):\nclass ModelVarType(enum.Enum):\nclass LossType(enum.Enum):\nclass GaussianDiffusion:\n PREVIOUS_X = enum.auto() # the model predicts x_{t-1}\n START_X = enum.auto() # the model predicts x_0\n EPSILON = enum.auto() # the model predicts epsilon\n LEARNED = enum.auto()\n FIXED_SMALL = enum.auto()\n FIXED_LARGE = enum.auto()\n LEARNED_RANGE = enum.auto()\n MSE = enum.auto() # use raw MSE loss (and KL when learning variances)\n RESCALED_MSE = (\n enum.auto()\n ) # use raw MSE loss (with RESCALED_KL when learning variances)\n KL = enum.auto() # use the variational lower-bound\n RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB\n B, C = x.shape[:2]\n B, C = x_t.shape[:2]" }, { "identifier": "model_wrapper", "path": "PixArt/sampling/dpm_solver.py", "snippet": "def model_wrapper(\n model,\n noise_schedule,\n model_type=\"noise\",\n model_kwargs={},\n guidance_type=\"uncond\",\n condition=None,\n unconditional_condition=None,\n guidance_scale=1.,\n classifier_fn=None,\n classifier_kwargs={},\n):\n \"\"\"Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to\n firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.\n\n We support four types of the diffusion model by setting `model_type`:\n\n 1. \"noise\": noise prediction model. (Trained by predicting noise).\n\n 2. \"x_start\": data prediction model. (Trained by predicting the data x_0 at time 0).\n\n 3. \"v\": velocity prediction model. (Trained by predicting the velocity).\n The \"v\" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].\n\n [1] Salimans, Tim, and Jonathan Ho. \"Progressive distillation for fast sampling of diffusion models.\"\n arXiv preprint arXiv:2202.00512 (2022).\n [2] Ho, Jonathan, et al. \"Imagen Video: High Definition Video Generation with Diffusion Models.\"\n arXiv preprint arXiv:2210.02303 (2022).\n\n 4. \"score\": marginal score function. (Trained by denoising score matching).\n Note that the score function and the noise prediction model follows a simple relationship:\n ```\n noise(x_t, t) = -sigma_t * score(x_t, t)\n ```\n\n We support three types of guided sampling by DPMs by setting `guidance_type`:\n 1. \"uncond\": unconditional sampling by DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n 2. \"classifier\": classifier guidance sampling [3] by DPMs and another classifier.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n The input `classifier_fn` has the following format:\n ``\n classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)\n ``\n\n [3] P. Dhariwal and A. Q. Nichol, \"Diffusion models beat GANs on image synthesis,\"\n in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.\n\n 3. \"classifier-free\": classifier-free guidance sampling by conditional DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score\n ``\n And if cond == `unconditional_condition`, the model output is the unconditional DPM output.\n\n [4] Ho, Jonathan, and Tim Salimans. \"Classifier-free diffusion guidance.\"\n arXiv preprint arXiv:2207.12598 (2022).\n\n\n The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)\n or continuous-time labels (i.e. epsilon to T).\n\n We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:\n ``\n def model_fn(x, t_continuous) -> noise:\n t_input = get_model_input_time(t_continuous)\n return noise_pred(model, x, t_input, **model_kwargs)\n ``\n where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.\n\n ===============================================================\n\n Args:\n model: A diffusion model with the corresponding format described above.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n model_type: A `str`. The parameterization type of the diffusion model.\n \"noise\" or \"x_start\" or \"v\" or \"score\".\n model_kwargs: A `dict`. A dict for the other inputs of the model function.\n guidance_type: A `str`. The type of the guidance for sampling.\n \"uncond\" or \"classifier\" or \"classifier-free\".\n condition: A pytorch tensor. The condition for the guided sampling.\n Only used for \"classifier\" or \"classifier-free\" guidance type.\n unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.\n Only used for \"classifier-free\" guidance type.\n guidance_scale: A `float`. The scale for the guided sampling.\n classifier_fn: A classifier function. Only used for the classifier guidance.\n classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.\n Returns:\n A noise prediction model that accepts the noised data and the continuous time as the inputs.\n \"\"\"\n\n def get_model_input_time(t_continuous):\n \"\"\"\n Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.\n For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].\n For continuous-time DPMs, we just use `t_continuous`.\n \"\"\"\n if noise_schedule.schedule == 'discrete':\n return (t_continuous - 1. / noise_schedule.total_N) * 1000.\n else:\n return t_continuous\n\n def noise_pred_fn(x, t_continuous, cond=None):\n t_input = get_model_input_time(t_continuous)\n if cond is None:\n output = model(\n x = x,\n timesteps = t_input,\n context = None,\n y = None,\n **model_kwargs\n )\n else:\n output = model(\n x = x,\n timesteps = t_input,\n context = cond,\n y = None,\n **model_kwargs\n )\n if model_type == \"noise\":\n return output\n elif model_type == \"x_start\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)\n return (x - expand_dims(alpha_t, x.dim()) * output) / expand_dims(sigma_t, x.dim())\n elif model_type == \"v\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)\n return expand_dims(alpha_t, x.dim()) * output + expand_dims(sigma_t, x.dim()) * x\n elif model_type == \"score\":\n sigma_t = noise_schedule.marginal_std(t_continuous)\n return -expand_dims(sigma_t, x.dim()) * output\n\n def cond_grad_fn(x, t_input):\n \"\"\"\n Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).\n \"\"\"\n with torch.enable_grad():\n x_in = x.detach().requires_grad_(True)\n log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)\n return torch.autograd.grad(log_prob.sum(), x_in)[0]\n\n def model_fn(x, t_continuous):\n \"\"\"\n The noise predicition model function that is used for DPM-Solver.\n \"\"\"\n if guidance_type == \"uncond\":\n return noise_pred_fn(x, t_continuous)\n elif guidance_type == \"classifier\":\n assert classifier_fn is not None\n t_input = get_model_input_time(t_continuous)\n cond_grad = cond_grad_fn(x, t_input)\n sigma_t = noise_schedule.marginal_std(t_continuous)\n noise = noise_pred_fn(x, t_continuous)\n return noise - guidance_scale * expand_dims(sigma_t, x.dim()) * cond_grad\n elif guidance_type == \"classifier-free\":\n if guidance_scale == 1. or unconditional_condition is None:\n return noise_pred_fn(x, t_continuous, cond=condition)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t_continuous] * 2)\n c_in = torch.cat([unconditional_condition, condition])\n noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)\n return noise_uncond + guidance_scale * (noise - noise_uncond)\n\n assert model_type in [\"noise\", \"x_start\", \"v\", \"score\"]\n assert guidance_type in [\"uncond\", \"classifier\", \"classifier-free\"]\n return model_fn" }, { "identifier": "DPM_Solver", "path": "PixArt/sampling/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(\n self,\n model_fn,\n noise_schedule,\n algorithm_type=\"dpmsolver++\",\n correcting_x0_fn=None,\n correcting_xt_fn=None,\n thresholding_max_val=1.,\n dynamic_thresholding_ratio=0.995,\n ):\n \"\"\"Construct a DPM-Solver.\n\n We support both DPM-Solver (`algorithm_type=\"dpmsolver\"`) and DPM-Solver++ (`algorithm_type=\"dpmsolver++\"`).\n\n We also support the \"dynamic thresholding\" method in Imagen[1]. For pixel-space diffusion models, you\n can set both `algorithm_type=\"dpmsolver++\"` and `correcting_x0_fn=\"dynamic_thresholding\"` to use the\n dynamic thresholding. The \"dynamic thresholding\" can greatly improve the sample quality for pixel-space\n DPMs with large guidance scales. Note that the thresholding method is **unsuitable** for latent-space\n DPMs (such as stable-diffusion).\n\n To support advanced algorithms in image-to-image applications, we also support corrector functions for\n both x0 and xt.\n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):\n ``\n def model_fn(x, t_continuous):\n return noise\n ``\n The shape of `x` is `(batch_size, **shape)`, and the shape of `t_continuous` is `(batch_size,)`.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n algorithm_type: A `str`. Either \"dpmsolver\" or \"dpmsolver++\".\n correcting_x0_fn: A `str` or a function with the following format:\n ```\n def correcting_x0_fn(x0, t):\n x0_new = ...\n return x0_new\n ```\n This function is to correct the outputs of the data prediction model at each sampling step. e.g.,\n ```\n x0_pred = data_pred_model(xt, t)\n if correcting_x0_fn is not None:\n x0_pred = correcting_x0_fn(x0_pred, t)\n xt_1 = update(x0_pred, xt, t)\n ```\n If `correcting_x0_fn=\"dynamic_thresholding\"`, we use the dynamic thresholding proposed in Imagen[1].\n correcting_xt_fn: A function with the following format:\n ```\n def correcting_xt_fn(xt, t, step):\n x_new = ...\n return x_new\n ```\n This function is to correct the intermediate samples xt at each sampling step. e.g.,\n ```\n xt = ...\n xt = correcting_xt_fn(xt, t, step)\n ```\n thresholding_max_val: A `float`. The max value for thresholding.\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n dynamic_thresholding_ratio: A `float`. The ratio for dynamic thresholding (see Imagen[1] for details).\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n\n [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour,\n Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models\n with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.\n \"\"\"\n self.model = lambda x, t: model_fn(x, t.expand((x.shape[0])))\n self.noise_schedule = noise_schedule\n assert algorithm_type in [\"dpmsolver\", \"dpmsolver++\"]\n self.algorithm_type = algorithm_type\n if correcting_x0_fn == \"dynamic_thresholding\":\n self.correcting_x0_fn = self.dynamic_thresholding_fn\n else:\n self.correcting_x0_fn = correcting_x0_fn\n self.correcting_xt_fn = correcting_xt_fn\n self.dynamic_thresholding_ratio = dynamic_thresholding_ratio\n self.thresholding_max_val = thresholding_max_val\n\n def dynamic_thresholding_fn(self, x0, t):\n \"\"\"\n The dynamic thresholding method.\n \"\"\"\n dims = x0.dim()\n p = self.dynamic_thresholding_ratio\n s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)\n s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)\n x0 = torch.clamp(x0, -s, s) / s\n return x0\n\n def noise_prediction_fn(self, x, t):\n \"\"\"\n Return the noise prediction model.\n \"\"\"\n return self.model(x, t)\n\n def data_prediction_fn(self, x, t):\n \"\"\"\n Return the data prediction model (with corrector).\n \"\"\"\n noise = self.noise_prediction_fn(x, t)\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)\n x0 = (x - sigma_t * noise) / alpha_t\n if self.correcting_x0_fn is not None:\n x0 = self.correcting_x0_fn(x0, t)\n return x0\n\n def model_fn(self, x, t):\n \"\"\"\n Convert the model to the noise prediction model or the data prediction model.\n \"\"\"\n if self.algorithm_type == \"dpmsolver++\":\n return self.data_prediction_fn(x, t)\n else:\n return self.noise_prediction_fn(x, t)\n\n def get_time_steps(self, skip_type, t_T, t_0, N, device):\n \"\"\"Compute the intermediate time steps for sampling.\n\n Args:\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n N: A `int`. The total number of the spacing of the time steps.\n device: A torch device.\n Returns:\n A pytorch tensor of the time steps, with the shape (N + 1,).\n \"\"\"\n if skip_type == 'logSNR':\n lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))\n lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))\n logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)\n return self.noise_schedule.inverse_lambda(logSNR_steps)\n elif skip_type == 'time_uniform':\n return torch.linspace(t_T, t_0, N + 1).to(device)\n elif skip_type == 'time_quadratic':\n t_order = 2\n t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)\n return t\n else:\n raise ValueError(\n \"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'\".format(skip_type))\n\n def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):\n \"\"\"\n Get the order of each step for sampling by the singlestep DPM-Solver.\n\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n \"\"\"\n if order == 3:\n K = steps // 3 + 1\n if steps % 3 == 0:\n orders = [3, ] * (K - 2) + [2, 1]\n elif steps % 3 == 1:\n orders = [3, ] * (K - 1) + [1]\n else:\n orders = [3, ] * (K - 1) + [2]\n elif order == 2:\n if steps % 2 == 0:\n K = steps // 2\n orders = [2, ] * K\n else:\n K = steps // 2 + 1\n orders = [2, ] * (K - 1) + [1]\n elif order == 1:\n K = 1\n orders = [1, ] * steps\n else:\n raise ValueError(\"'order' must be '1' or '2' or '3'.\")\n if skip_type == 'logSNR':\n # To reproduce the results in DPM-Solver paper\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)\n else:\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[\n torch.cumsum(torch.tensor([0, ] + orders), 0).to(device)]\n return timesteps_outer, orders\n\n def denoise_to_zero_fn(self, x, s):\n \"\"\"\n Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.\n \"\"\"\n return self.data_prediction_fn(x, s)\n\n def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):\n \"\"\"\n DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = (\n sigma_t / sigma_s * x\n - alpha_t * phi_1 * model_s\n )\n if return_intermediate:\n return x_t, {'model_s': model_s}\n else:\n return x_t\n else:\n phi_1 = torch.expm1(h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n )\n if return_intermediate:\n return x_t, {'model_s': model_s}\n else:\n return x_t\n\n def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,\n solver_type='dpmsolver'):\n \"\"\"\n Singlestep solver DPM-Solver-2 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the second-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpmsolver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(solver_type))\n if r1 is None:\n r1 = 0.5\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n s1 = ns.inverse_lambda(lambda_s1)\n log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(\n s1), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)\n alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_1 = torch.expm1(-h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n (sigma_s1 / sigma_s) * x\n - (alpha_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == 'dpmsolver':\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n - (0.5 / r1) * (alpha_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == 'taylor':\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1. / r1) * (alpha_t * (phi_1 / h + 1.)) * (model_s1 - model_s)\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_1 = torch.expm1(h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n torch.exp(log_alpha_s1 - log_alpha_s) * x\n - (sigma_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == 'dpmsolver':\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (0.5 / r1) * (sigma_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == 'taylor':\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (1. / r1) * (sigma_t * (phi_1 / h - 1.)) * (model_s1 - model_s)\n )\n if return_intermediate:\n return x_t, {'model_s': model_s, 'model_s1': model_s1}\n else:\n return x_t\n\n def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,\n return_intermediate=False, solver_type='dpmsolver'):\n \"\"\"\n Singlestep solver DPM-Solver-3 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).\n If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpmsolver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(solver_type))\n if r1 is None:\n r1 = 1. / 3.\n if r2 is None:\n r2 = 2. / 3.\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n lambda_s2 = lambda_s + r2 * h\n s1 = ns.inverse_lambda(lambda_s1)\n s2 = ns.inverse_lambda(lambda_s2)\n log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(\n s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(\n s2), ns.marginal_std(t)\n alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_12 = torch.expm1(-r2 * h)\n phi_1 = torch.expm1(-h)\n phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.\n phi_2 = phi_1 / h + 1.\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (\n (sigma_s1 / sigma_s) * x\n - (alpha_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (sigma_s2 / sigma_s) * x\n - (alpha_s2 * phi_12) * model_s\n + r2 / r1 * (alpha_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == 'dpmsolver':\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1. / r2) * (alpha_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == 'taylor':\n D1_0 = (1. / r1) * (model_s1 - model_s)\n D1_1 = (1. / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2. * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_12 = torch.expm1(r2 * h)\n phi_1 = torch.expm1(h)\n phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.\n phi_2 = phi_1 / h - 1.\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (\n (torch.exp(log_alpha_s1 - log_alpha_s)) * x\n - (sigma_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (torch.exp(log_alpha_s2 - log_alpha_s)) * x\n - (sigma_s2 * phi_12) * model_s\n - r2 / r1 * (sigma_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == 'dpmsolver':\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (1. / r2) * (sigma_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == 'taylor':\n D1_0 = (1. / r1) * (model_s1 - model_s)\n D1_1 = (1. / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2. * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n\n if return_intermediate:\n return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}\n else:\n return x_t\n\n def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type=\"dpmsolver\"):\n \"\"\"\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpmsolver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(solver_type))\n ns = self.noise_schedule\n model_prev_1, model_prev_0 = model_prev_list[-2], model_prev_list[-1]\n t_prev_1, t_prev_0 = t_prev_list[-2], t_prev_list[-1]\n lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(\n t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0 = h_0 / h\n D1_0 = (1. / r0) * (model_prev_0 - model_prev_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if solver_type == 'dpmsolver':\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n - 0.5 * (alpha_t * phi_1) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * (phi_1 / h + 1.)) * D1_0\n )\n else:\n phi_1 = torch.expm1(h)\n if solver_type == 'dpmsolver':\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - 0.5 * (sigma_t * phi_1) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * (phi_1 / h - 1.)) * D1_0\n )\n return x_t\n\n def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpmsolver'):\n \"\"\"\n Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n model_prev_2, model_prev_1, model_prev_0 = model_prev_list\n t_prev_2, t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(\n t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_1 = lambda_prev_1 - lambda_prev_2\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0, r1 = h_0 / h, h_1 / h\n D1_0 = (1. / r0) * (model_prev_0 - model_prev_1)\n D1_1 = (1. / r1) * (model_prev_1 - model_prev_2)\n D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)\n D2 = (1. / (r0 + r1)) * (D1_0 - D1_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n phi_2 = phi_1 / h + 1.\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_1 = torch.expm1(h)\n phi_2 = phi_1 / h - 1.\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n return x_t\n\n def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpmsolver', r1=None,\n r2=None):\n \"\"\"\n Singlestep DPM-Solver with the order `order` from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n r1: A `float`. The hyperparameter of the second-order or third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)\n elif order == 2:\n return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,\n solver_type=solver_type, r1=r1)\n elif order == 3:\n return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,\n solver_type=solver_type, r1=r1, r2=r2)\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpmsolver'):\n \"\"\"\n Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])\n elif order == 2:\n return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)\n elif order == 3:\n return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,\n solver_type='dpmsolver'):\n \"\"\"\n The adaptive step size solver based on singlestep DPM-Solver.\n\n Args:\n x: A pytorch tensor. The initial value at time `t_T`.\n order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n h_init: A `float`. The initial step size (for logSNR).\n atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].\n rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.\n theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].\n t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the\n current time and `t_0` is less than `t_err`. The default setting is 1e-5.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_0: A pytorch tensor. The approximated solution at time `t_0`.\n\n [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, \"Gotta go fast when generating data with score-based models,\" arXiv preprint arXiv:2105.14080, 2021.\n \"\"\"\n ns = self.noise_schedule\n s = t_T * torch.ones((1,)).to(x)\n lambda_s = ns.marginal_lambda(s)\n lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))\n h = h_init * torch.ones_like(s).to(x)\n x_prev = x\n nfe = 0\n if order == 2:\n r1 = 0.5\n lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)\n higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,\n solver_type=solver_type,\n **kwargs)\n elif order == 3:\n r1, r2 = 1. / 3., 2. / 3.\n lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,\n return_intermediate=True,\n solver_type=solver_type)\n higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,\n solver_type=solver_type,\n **kwargs)\n else:\n raise ValueError(\"For adaptive step size solver, order must be 2 or 3, got {}\".format(order))\n while torch.abs((s - t_0)).mean() > t_err:\n t = ns.inverse_lambda(lambda_s + h)\n x_lower, lower_noise_kwargs = lower_update(x, s, t)\n x_higher = higher_update(x, s, t, **lower_noise_kwargs)\n delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))\n norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))\n E = norm_fn((x_higher - x_lower) / delta).max()\n if torch.all(E <= 1.):\n x = x_higher\n s = t\n x_prev = x_lower\n lambda_s = ns.marginal_lambda(s)\n h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)\n nfe += order\n print('adaptive solver nfe', nfe)\n return x\n\n def add_noise(self, x, t, noise=None):\n \"\"\"\n Compute the noised input xt = alpha_t * x + sigma_t * noise.\n\n Args:\n x: A `torch.Tensor` with shape `(batch_size, *shape)`.\n t: A `torch.Tensor` with shape `(t_size,)`.\n Returns:\n xt with shape `(t_size, batch_size, *shape)`.\n \"\"\"\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)\n if noise is None:\n noise = torch.randn((t.shape[0], *x.shape), device=x.device)\n x = x.reshape((-1, *x.shape))\n xt = expand_dims(alpha_t, x.dim()) * x + expand_dims(sigma_t, x.dim()) * noise\n if t.shape[0] == 1:\n return xt.squeeze(0)\n else:\n return xt\n\n def inverse(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',\n method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver',\n atol=0.0078, rtol=0.05, return_intermediate=False,\n ):\n \"\"\"\n Inverse the sample `x` from time `t_start` to `t_end` by DPM-Solver.\n For discrete-time DPMs, we use `t_start=1/N`, where `N` is the total time steps during training.\n \"\"\"\n t_0 = 1. / self.noise_schedule.total_N if t_start is None else t_start\n t_T = self.noise_schedule.T if t_end is None else t_end\n assert t_0 > 0 and t_T > 0, \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n return self.sample(x, steps=steps, t_start=t_0, t_end=t_T, order=order, skip_type=skip_type,\n method=method, lower_order_final=lower_order_final, denoise_to_zero=denoise_to_zero,\n solver_type=solver_type,\n atol=atol, rtol=rtol, return_intermediate=return_intermediate)\n\n def sample(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',\n method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver',\n atol=0.0078, rtol=0.05, return_intermediate=False, latent_scale_factor=1.0, pbar=None, previewer=None,\n ):\n \"\"\"\n Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.\n\n =====================================================\n\n We support the following algorithms for both noise prediction model and data prediction model:\n - 'singlestep':\n Singlestep DPM-Solver (i.e. \"DPM-Solver-fast\" in the paper), which combines different orders of singlestep DPM-Solver.\n We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).\n The total number of function evaluations (NFE) == `steps`.\n Given a fixed NFE == `steps`, the sampling procedure is:\n - If `order` == 1:\n - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.\n - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If `order` == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.\n - 'multistep':\n Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.\n We initialize the first `order` values by lower order multistep solvers.\n Given a fixed NFE == `steps`, the sampling procedure is:\n Denote K = steps.\n - If `order` == 1:\n - We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.\n - If `order` == 3:\n - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.\n - 'singlestep_fixed':\n Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).\n We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.\n - 'adaptive':\n Adaptive step size DPM-Solver (i.e. \"DPM-Solver-12\" and \"DPM-Solver-23\" in the paper).\n We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.\n You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs\n (NFE) and the sample quality.\n - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.\n - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.\n\n =====================================================\n\n Some advices for choosing the algorithm:\n - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:\n Use singlestep DPM-Solver or DPM-Solver++ (\"DPM-Solver-fast\" in the paper) with `order = 3`.\n e.g., DPM-Solver:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n e.g., DPM-Solver++:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n - For **guided sampling with large guidance scale** by DPMs:\n Use multistep DPM-Solver with `algorithm_type=\"dpmsolver++\"` and `order = 2`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,\n skip_type='time_uniform', method='multistep')\n\n We support three types of `skip_type`:\n - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**\n - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.\n - 'time_quadratic': quadratic time for the time steps.\n\n =====================================================\n Args:\n x: A pytorch tensor. The initial value at time `t_start`\n e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.\n steps: A `int`. The total number of function evaluations (NFE).\n t_start: A `float`. The starting time of the sampling.\n If `T` is None, we use self.noise_schedule.T (default is 1.0).\n t_end: A `float`. The ending time of the sampling.\n If `t_end` is None, we use 1. / self.noise_schedule.total_N.\n e.g. if total_N == 1000, we have `t_end` == 1e-3.\n For discrete-time DPMs:\n - We recommend `t_end` == 1. / self.noise_schedule.total_N.\n For continuous-time DPMs:\n - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.\n order: A `int`. The order of DPM-Solver.\n skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.\n method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.\n denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.\n Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).\n\n This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and\n score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID\n for diffusion models sampling by diffusion SDEs for low-resolutional images\n (such as CIFAR-10). However, we observed that such trick does not matter for\n high-resolutional images. As it needs an additional NFE, we do not recommend\n it for high-resolutional images.\n lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.\n Only valid for `method=multistep` and `steps < 15`. We empirically find that\n this trick is a key to stabilizing the sampling by DPM-Solver with very few steps\n (especially for steps <= 10). So we recommend to set it to be `True`.\n solver_type: A `str`. The taylor expansion type for the solver. `dpmsolver` or `taylor`. We recommend `dpmsolver`.\n atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n return_intermediate: A `bool`. Whether to save the xt at each step.\n When set to `True`, method returns a tuple (x0, intermediates); when set to False, method returns only x0.\n Returns:\n x_end: A pytorch tensor. The approximated solution at time `t_end`.\n\n \"\"\"\n t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end\n t_T = self.noise_schedule.T if t_start is None else t_start\n assert t_0 > 0 and t_T > 0, \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n if return_intermediate:\n assert method in ['multistep', 'singlestep',\n 'singlestep_fixed'], \"Cannot use adaptive solver when saving intermediate values\"\n if self.correcting_xt_fn is not None:\n assert method in ['multistep', 'singlestep',\n 'singlestep_fixed'], \"Cannot use adaptive solver when correcting_xt_fn is not None\"\n device = x.device\n intermediates = []\n with torch.no_grad():\n if method == 'adaptive':\n x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,\n solver_type=solver_type)\n elif method == 'multistep':\n assert steps >= order\n timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)\n assert timesteps.shape[0] - 1 == steps\n # Init the initial values.\n step = 0\n t = timesteps[step]\n t_prev_list = [t]\n model_prev_list = [self.model_fn(x, t)]\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n # Init the first `order` values by lower order multistep DPM-Solver.\n for step in range(1, order):\n t = timesteps[step]\n x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step,\n solver_type=solver_type)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n t_prev_list.append(t)\n model_prev_list.append(self.model_fn(x, t))\n # Compute the remaining values by `order`-th order multistep DPM-Solver.\n for step in tqdm(range(order, steps + 1)):\n t = timesteps[step]\n # We only use lower order for steps < 10\n if lower_order_final and steps < 10:\n step_order = min(order, steps + 1 - step)\n else:\n step_order = order\n x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step_order,\n solver_type=solver_type)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n for i in range(order - 1):\n t_prev_list[i] = t_prev_list[i + 1]\n model_prev_list[i] = model_prev_list[i + 1]\n t_prev_list[-1] = t\n # We do not need to evaluate the final model value.\n if step < steps:\n model_prev_list[-1] = self.model_fn(x, t)\n # comfyui preview\n if pbar:\n preview_bytes = None\n if previewer:\n preview_bytes = previewer.decode_latent_to_preview_image(\"JPEG\", x)\n pbar.update_absolute(step, steps, preview_bytes)\n\n elif method in ['singlestep', 'singlestep_fixed']:\n if method == 'singlestep':\n timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps,\n order=order,\n skip_type=skip_type,\n t_T=t_T, t_0=t_0,\n device=device)\n elif method == 'singlestep_fixed':\n K = steps // order\n orders = [order, ] * K\n timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)\n for step, order in enumerate(orders):\n s, t = timesteps_outer[step], timesteps_outer[step + 1]\n timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=s.item(), t_0=t.item(), N=order,\n device=device)\n lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)\n h = lambda_inner[-1] - lambda_inner[0]\n r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h\n r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h\n x = self.singlestep_dpm_solver_update(x, s, t, order, solver_type=solver_type, r1=r1, r2=r2)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n else:\n raise ValueError(\"Got wrong method {}\".format(method))\n if denoise_to_zero:\n t = torch.ones((1,)).to(device) * t_0\n x = self.denoise_to_zero_fn(x, t)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step + 1)\n if return_intermediate:\n intermediates.append(x)\n\n if return_intermediate:\n return x, intermediates\n else:\n return x" }, { "identifier": "NoiseScheduleVP", "path": "PixArt/sampling/dpm_solver.py", "snippet": "class NoiseScheduleVP:\n def __init__(\n self,\n schedule='discrete',\n betas=None,\n alphas_cumprod=None,\n continuous_beta_0=0.1,\n continuous_beta_1=20.,\n dtype=torch.float32,\n ):\n \"\"\"Create a wrapper class for the forward SDE (VP type).\n\n ***\n Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.\n We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.\n ***\n\n The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).\n We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).\n Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:\n\n log_alpha_t = self.marginal_log_mean_coeff(t)\n sigma_t = self.marginal_std(t)\n lambda_t = self.marginal_lambda(t)\n\n Moreover, as lambda(t) is an invertible function, we also support its inverse function:\n\n t = self.inverse_lambda(lambda_t)\n\n ===============================================================\n\n We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).\n\n 1. For discrete-time DPMs:\n\n For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:\n t_i = (i + 1) / N\n e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.\n We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.\n\n Args:\n betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)\n alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)\n\n Note that we always have alphas_cumprod = cumprod(1 - betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.\n\n **Important**: Please pay special attention for the args for `alphas_cumprod`:\n The `alphas_cumprod` is the \\hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that\n q_{t_n | 0}(x_{t_n} | x_0) = N ( \\sqrt{\\hat{alpha_n}} * x_0, (1 - \\hat{alpha_n}) * I ).\n Therefore, the notation \\hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have\n alpha_{t_n} = \\sqrt{\\hat{alpha_n}},\n and\n log(alpha_{t_n}) = 0.5 * log(\\hat{alpha_n}).\n\n\n 2. For continuous-time DPMs:\n\n We support the linear VPSDE for the continuous time setting. The hyperparameters for the noise\n schedule are the default settings in Yang Song's ScoreSDE:\n\n Args:\n beta_min: A `float` number. The smallest beta for the linear schedule.\n beta_max: A `float` number. The largest beta for the linear schedule.\n T: A `float` number. The ending time of the forward process.\n\n ===============================================================\n\n Args:\n schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,\n 'linear' for continuous-time DPMs.\n Returns:\n A wrapper object of the forward SDE (VP type).\n\n ===============================================================\n\n Example:\n\n # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', betas=betas)\n\n # For discrete-time DPMs, given alphas_cumprod (the \\hat{alpha_n} array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)\n\n # For continuous-time DPMs (VPSDE), linear schedule:\n >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)\n\n \"\"\"\n\n if schedule not in ['discrete', 'linear']:\n raise ValueError(\n \"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear'\".format(schedule))\n\n self.schedule = schedule\n if schedule == 'discrete':\n if betas is not None:\n log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)\n else:\n assert alphas_cumprod is not None\n log_alphas = 0.5 * torch.log(alphas_cumprod)\n self.T = 1.\n self.log_alpha_array = self.numerical_clip_alpha(log_alphas).reshape((1, -1,)).to(dtype=dtype)\n self.total_N = self.log_alpha_array.shape[1]\n self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)).to(dtype=dtype)\n else:\n self.T = 1.\n self.total_N = 1000\n self.beta_0 = continuous_beta_0\n self.beta_1 = continuous_beta_1\n\n def numerical_clip_alpha(self, log_alphas, clipped_lambda=-5.1):\n \"\"\"\n For some beta schedules such as cosine schedule, the log-SNR has numerical isssues.\n We clip the log-SNR near t=T within -5.1 to ensure the stability.\n Such a trick is very useful for diffusion models with the cosine schedule, such as i-DDPM, guided-diffusion and GLIDE.\n \"\"\"\n log_sigmas = 0.5 * torch.log(1. - torch.exp(2. * log_alphas))\n lambs = log_alphas - log_sigmas\n idx = torch.searchsorted(torch.flip(lambs, [0]), clipped_lambda)\n if idx > 0:\n log_alphas = log_alphas[:-idx]\n return log_alphas\n\n def marginal_log_mean_coeff(self, t):\n \"\"\"\n Compute log(alpha_t) of a given continuous-time label t in [0, T].\n \"\"\"\n if self.schedule == 'discrete':\n return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),\n self.log_alpha_array.to(t.device)).reshape((-1))\n elif self.schedule == 'linear':\n return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0\n\n def marginal_alpha(self, t):\n \"\"\"\n Compute alpha_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.exp(self.marginal_log_mean_coeff(t))\n\n def marginal_std(self, t):\n \"\"\"\n Compute sigma_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))\n\n def marginal_lambda(self, t):\n \"\"\"\n Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].\n \"\"\"\n log_mean_coeff = self.marginal_log_mean_coeff(t)\n log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))\n return log_mean_coeff - log_std\n\n def inverse_lambda(self, lamb):\n \"\"\"\n Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.\n \"\"\"\n if self.schedule == 'linear':\n tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))\n Delta = self.beta_0 ** 2 + tmp\n return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)\n elif self.schedule == 'discrete':\n log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)\n t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),\n torch.flip(self.t_array.to(lamb.device), [1]))\n return t.reshape((-1,))" } ]
import torch import comfy.utils import latent_preview from .sampling import gaussian_diffusion as gd from .sampling.dpm_solver import model_wrapper, DPM_Solver, NoiseScheduleVP from comfy.sample import prepare_sampling, prepare_noise, cleanup_additional_models, get_models_from_cond
20,046
def sample_pixart(model, seed, steps, cfg, noise_schedule, noise_schedule_vp, positive, negative, latent_image): """ Mostly just a wrapper around the reference code. """ # prepare model noise = prepare_noise(latent_image, seed) real_model, _, _, _, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask=None) # negative cond cond = positive[0][0] raw_uncond = negative[0][0] # Sampler seems to want the same dim for cond and uncond # truncate uncond to the length of cond # if shorter, pad uncond with y_null null_y = real_model.diffusion_model.y_embedder.y_embedding[None].repeat(latent_image.shape[0], 1, 1) uncond = null_y[:, :cond.shape[1], :] uncond[:, :raw_uncond.shape[1], :] = raw_uncond[:, :cond.shape[1], :] if raw_uncond.shape[1] > cond.shape[1]: print("PixArt: Warning. Your negative prompt is too long.") uncond[:, -1, :] = raw_uncond[:, -1, :] # add back EOS token # Move inputs cond = cond.to(model.load_device).to(real_model.diffusion_model.dtype) uncond = uncond.to(model.load_device).to(real_model.diffusion_model.dtype) noise = noise.to(model.load_device).to(real_model.diffusion_model.dtype) # preview pbar = comfy.utils.ProgressBar(steps) previewer = latent_preview.get_previewer(model.load_device, model.model.latent_format) ## Noise schedule. betas = torch.tensor(gd.get_named_beta_schedule(noise_schedule, 1000))
def sample_pixart(model, seed, steps, cfg, noise_schedule, noise_schedule_vp, positive, negative, latent_image): """ Mostly just a wrapper around the reference code. """ # prepare model noise = prepare_noise(latent_image, seed) real_model, _, _, _, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask=None) # negative cond cond = positive[0][0] raw_uncond = negative[0][0] # Sampler seems to want the same dim for cond and uncond # truncate uncond to the length of cond # if shorter, pad uncond with y_null null_y = real_model.diffusion_model.y_embedder.y_embedding[None].repeat(latent_image.shape[0], 1, 1) uncond = null_y[:, :cond.shape[1], :] uncond[:, :raw_uncond.shape[1], :] = raw_uncond[:, :cond.shape[1], :] if raw_uncond.shape[1] > cond.shape[1]: print("PixArt: Warning. Your negative prompt is too long.") uncond[:, -1, :] = raw_uncond[:, -1, :] # add back EOS token # Move inputs cond = cond.to(model.load_device).to(real_model.diffusion_model.dtype) uncond = uncond.to(model.load_device).to(real_model.diffusion_model.dtype) noise = noise.to(model.load_device).to(real_model.diffusion_model.dtype) # preview pbar = comfy.utils.ProgressBar(steps) previewer = latent_preview.get_previewer(model.load_device, model.model.latent_format) ## Noise schedule. betas = torch.tensor(gd.get_named_beta_schedule(noise_schedule, 1000))
noise_schedule = NoiseScheduleVP(schedule=noise_schedule_vp, betas=betas)
3
2023-10-20 21:19:44+00:00
24k
amitfin/oref_alert
custom_components/oref_alert/config_flow.py
[ { "identifier": "CONF_AREAS", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_AREAS: Final = \"areas\"" }, { "identifier": "CONF_ALERT_MAX_AGE", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_ALERT_MAX_AGE: Final = \"alert_max_age\"" }, { "identifier": "CONF_OFF_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_OFF_ICON: Final = \"off_icon\"" }, { "identifier": "CONF_ON_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_ON_ICON: Final = \"on_icon\"" }, { "identifier": "CONF_POLL_INTERVAL", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_POLL_INTERVAL: Final = \"poll_interval\"" }, { "identifier": "DEFAULT_ALERT_MAX_AGE", "path": "custom_components/oref_alert/const.py", "snippet": "DEFAULT_ALERT_MAX_AGE: Final = 10" }, { "identifier": "DOMAIN", "path": "custom_components/oref_alert/const.py", "snippet": "DOMAIN: Final = \"oref_alert\"" }, { "identifier": "DEFAULT_OFF_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "DEFAULT_OFF_ICON: Final = \"mdi:home-outline\"" }, { "identifier": "DEFAULT_ON_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "DEFAULT_ON_ICON: Final = \"mdi:home-alert-outline\"" }, { "identifier": "DEFAULT_POLL_INTERVAL", "path": "custom_components/oref_alert/const.py", "snippet": "DEFAULT_POLL_INTERVAL: Final = 2" }, { "identifier": "TITLE", "path": "custom_components/oref_alert/const.py", "snippet": "TITLE: Final = \"Oref Alert\"" }, { "identifier": "find_area", "path": "custom_components/oref_alert/metadata/area_to_polygon.py", "snippet": "def find_area(lat: float, long: float) -> str | None:\n \"\"\"Find an area using lat/long.\"\"\"\n point = Point(lat, long)\n for area, polygon in _load_area_to_polygon().items():\n if Polygon(polygon).contains(point):\n return area\n return None" }, { "identifier": "AREAS_AND_GROUPS", "path": "custom_components/oref_alert/metadata/areas_and_groups.py", "snippet": "AREAS_AND_GROUPS = [\n \"אבו סנאן\",\n \"אבו קרינאת\",\n \"אבו תלול\",\n \"אבו-גוש\",\n \"אבטליון\",\n \"אביאל\",\n \"אביבים\",\n \"אביגדור\",\n \"אביחיל\",\n \"אביעזר\",\n \"אבירים\",\n \"אבן יהודה\",\n \"אבן מנחם\",\n \"אבן ספיר\",\n \"אבן שמואל\",\n \"אבני איתן\",\n \"אבני חפץ\",\n \"אבנת\",\n \"אבשלום\",\n \"אדורה\",\n \"אדוריים\",\n \"אדמית\",\n \"אדרת\",\n \"אודים\",\n \"אודם\",\n \"אום אל פחם\",\n \"אום אל קוטוף\",\n \"אום אל-גנם\",\n \"אום בטין\",\n \"אופקים\",\n \"אור הגנוז\",\n \"אור הנר\",\n \"אור יהודה\",\n \"אור עקיבא\",\n \"אורה\",\n \"אורון תעשייה ומסחר\",\n \"אורות\",\n \"אורטל\",\n \"אורים\",\n \"אורנים\",\n \"אורנית\",\n \"אושה\",\n \"אזור\",\n \"אזור תעשייה אכזיב מילואות\",\n \"אזור תעשייה אלון התבור\",\n \"אזור תעשייה אפק ולב הארץ\",\n \"אזור תעשייה באר טוביה\",\n \"אזור תעשייה בני יהודה\",\n \"אזור תעשייה בר-לב\",\n \"אזור תעשייה בראון\",\n \"אזור תעשייה ברוש\",\n \"אזור תעשייה דימונה\",\n \"אזור תעשייה הדרומי אשקלון\",\n \"אזור תעשייה הר טוב - צרעה\",\n \"אזור תעשייה חבל מודיעין\",\n \"אזור תעשייה חצור הגלילית\",\n \"אזור תעשייה טירה\",\n \"אזור תעשייה יקנעם עילית\",\n \"אזור תעשייה כנות\",\n \"אזור תעשייה כרמיאל\",\n \"אזור תעשייה מבוא כרמל\",\n \"אזור תעשייה מבואות הגלבוע\",\n \"אזור תעשייה מישור אדומים\",\n \"אזור תעשייה מיתרים\",\n \"אזור תעשייה נ.ע.מ\",\n \"אזור תעשייה ניר עציון\",\n \"אזור תעשייה נשר - רמלה\",\n \"אזור תעשייה עד הלום\",\n \"אזור תעשייה עידן הנגב\",\n \"אזור תעשייה עמק חפר\",\n \"אזור תעשייה צ.ח.ר\",\n \"אזור תעשייה צבאים\",\n \"אזור תעשייה ציפורית\",\n \"אזור תעשייה צמח\",\n \"אזור תעשייה צפוני אשקלון\",\n \"אזור תעשייה קדמת גליל\",\n \"אזור תעשייה קיסריה\",\n \"אזור תעשייה קריית גת\",\n \"אזור תעשייה רגבים\",\n \"אזור תעשייה רותם\",\n \"אזור תעשייה רמת דלתון\",\n \"אזור תעשייה שחורת\",\n \"אזור תעשייה שער בנימין\",\n \"אזור תעשייה שער נעמן\",\n \"אזור תעשייה תימורים\",\n \"אזור תעשייה תרדיון\",\n \"אחווה\",\n \"אחוזם\",\n \"אחוזת ברק\",\n \"אחיה\",\n \"אחיהוד\",\n \"אחיטוב\",\n \"אחיסמך\",\n \"אחיעזר\",\n \"איבטין\",\n \"אייל\",\n \"איילת השחר\",\n \"אילון\",\n \"אילות\",\n \"אילניה\",\n \"אילת\",\n \"אירוס\",\n \"איתמר\",\n \"איתן\",\n \"אכסאל\",\n \"אל סייד\",\n \"אל עזי\",\n \"אל עמארני, אל מסק\",\n \"אל עריאן\",\n \"אל פורעה\",\n \"אל רום\",\n \"אל-ח'וואלד מערב\",\n \"אלומה\",\n \"אלומות\",\n \"אלון\",\n \"אלון הגליל\",\n \"אלון מורה\",\n \"אלון שבות\",\n \"אלוני אבא\",\n \"אלוני הבשן\",\n \"אלוני יצחק\",\n \"אלונים\",\n \"אלי עד\",\n \"אליאב\",\n \"אליכין\",\n \"אליפז ומכרות תמנע\",\n \"אליפלט\",\n \"אליקים\",\n \"אלישיב\",\n \"אלישמע\",\n \"אלמגור\",\n \"אלמוג\",\n \"אלעד\",\n \"אלעזר\",\n \"אלפי מנשה\",\n \"אלקוש\",\n \"אלקנה\",\n \"אמונים\",\n \"אמירים\",\n \"אמנון\",\n \"אמץ\",\n \"אמציה\",\n \"אניעם\",\n \"אעבלין\",\n \"אפיק\",\n \"אפיקים\",\n \"אפק\",\n \"אפרת\",\n \"ארבל\",\n \"ארגמן\",\n \"ארז\",\n \"אריאל\",\n \"ארסוף\",\n \"אשבול\",\n \"אשבל\",\n \"אשדוד - א,ב,ד,ה\",\n \"אשדוד - איזור תעשייה צפוני\",\n \"אשדוד - ג,ו,ז\",\n \"אשדוד - ח,ט,י,יג,יד,טז\",\n \"אשדוד - כל האזורים\",\n \"אשדוד -יא,יב,טו,יז,מרינה,סיט\",\n \"אשדות יעקב איחוד\",\n \"אשדות יעקב מאוחד\",\n \"אשחר\",\n \"אשכולות\",\n \"אשל הנשיא\",\n \"אשלים\",\n \"אשקלון - דרום\",\n \"אשקלון - כל האזורים\",\n \"אשקלון - צפון\",\n \"אשרת\",\n \"אשתאול\",\n \"אתר דודאים\",\n \"אתר ההנצחה גולני\",\n \"באקה אל גרבייה\",\n \"באר אורה\",\n \"באר גנים\",\n \"באר טוביה\",\n \"באר יעקב\",\n \"באר מילכה\",\n \"באר שבע - דרום\",\n \"באר שבע - כל האזורים\",\n \"באר שבע - מזרח\",\n \"באר שבע - מערב\",\n \"באר שבע - צפון\",\n \"בארות יצחק\",\n \"בארותיים\",\n \"בארי\",\n \"בוסתן הגליל\",\n \"בועיינה-נוג'ידאת\",\n \"בוקעתא\",\n \"בורגתה\",\n \"בחן\",\n \"בטחה\",\n \"ביצרון\",\n \"ביר אלמכסור\",\n \"ביר הדאג'\",\n \"ביריה\",\n \"בית אורן\",\n \"בית אל\",\n \"בית אלעזרי\",\n \"בית אלפא וחפציבה\",\n \"בית אריה\",\n \"בית ברל\",\n \"בית ג'אן\",\n \"בית גוברין\",\n \"בית גמליאל\",\n \"בית דגן\",\n \"בית הגדי\",\n \"בית הלוי\",\n \"בית הלל\",\n \"בית העמק\",\n \"בית הערבה\",\n \"בית השיטה\",\n \"בית זית\",\n \"בית זרע\",\n \"בית חגי\",\n \"בית חורון\",\n \"בית חזון\",\n \"בית חלקיה\",\n \"בית חנן\",\n \"בית חנניה\",\n \"בית חרות\",\n \"בית חשמונאי\",\n \"בית יהושע\",\n \"בית יוסף\",\n \"בית ינאי\",\n \"בית יצחק - שער חפר\",\n \"בית ירח\",\n \"בית יתיר\",\n \"בית לחם הגלילית\",\n \"בית מאיר\",\n \"בית נחמיה\",\n \"בית ניר\",\n \"בית נקופה\",\n \"בית סוהר השרון\",\n \"בית סוהר מגידו\",\n \"בית סוהר נפחא\",\n \"בית סוהר צלמון\",\n \"בית סוהר קישון\",\n \"בית סוהר שיטה וגלבוע\",\n \"בית ספר אורט בנימינה\",\n \"בית ספר שדה מירון\",\n \"בית עובד\",\n \"בית עוזיאל\",\n \"בית עזרא\",\n \"בית עלמין תל רגב\",\n \"בית עריף\",\n \"בית צבי\",\n \"בית קמה\",\n \"בית קשת\",\n \"בית רימון\",\n \"בית שאן\",\n \"בית שמש\",\n \"בית שערים\",\n \"בית שקמה\",\n \"ביתן אהרן\",\n \"ביתר עילית\",\n \"בלפוריה\",\n \"בן זכאי\",\n \"בן עמי\",\n \"בן שמן\",\n \"בני ברק\",\n \"בני דקלים\",\n \"בני דרום\",\n \"בני דרור\",\n \"בני יהודה וגבעת יואב\",\n \"בני נצרים\",\n \"בני עטרות\",\n \"בני עי''ש\",\n \"בני ציון\",\n \"בני ראם\",\n \"בניה\",\n \"בנימינה\",\n 'בסמ\"ה',\n \"בסמת טבעון\",\n \"בענה\",\n \"בצרה\",\n \"בצת\",\n \"בקוע\",\n \"בקעות\",\n \"בר גיורא\",\n \"בר יוחאי\",\n \"ברוכין\",\n \"ברור חיל\",\n \"ברוש\",\n \"ברטעה\",\n \"ברכיה\",\n \"ברעם\",\n \"ברקאי\",\n \"ברקן\",\n \"ברקת\",\n \"בת הדר\",\n \"בת חן\",\n \"בת חפר\",\n \"בת עין\",\n \"בת שלמה\",\n \"בת-ים\",\n \"בתי מלון ים המלח\",\n \"ג'דידה מכר\",\n \"ג'וליס\",\n \"ג'לג'וליה\",\n \"ג'סר א-זרקא\",\n \"ג'ש - גוש חלב\",\n \"ג'ת\",\n \"גאולי תימן\",\n \"גאולים\",\n \"גאליה\",\n \"גבולות\",\n \"גבים, מכללת ספיר\",\n \"גבע בנימין\",\n \"גבע כרמל\",\n \"גבעון החדשה\",\n \"גבעות\",\n \"גבעות בר\",\n \"גבעות גורל\",\n \"גבעות עדן\",\n \"גבעת אבני\",\n \"גבעת אלה\",\n \"גבעת אסף\",\n \"גבעת ברנר\",\n \"גבעת הראל וגבעת הרואה\",\n \"גבעת השלושה\",\n \"גבעת וולפסון\",\n \"גבעת וושינגטון\",\n \"גבעת זאב\",\n \"גבעת חביבה\",\n \"גבעת חיים איחוד\",\n \"גבעת חיים מאוחד\",\n \"גבעת חן\",\n \"גבעת יערים\",\n \"גבעת ישעיהו\",\n \"גבעת כ''ח\",\n \"גבעת ניל''י\",\n \"גבעת עדה\",\n \"גבעת עוז\",\n \"גבעת שמואל\",\n \"גבעת שפירא\",\n \"גבעתי\",\n \"גבעתיים\",\n \"גברעם\",\n \"גבת\",\n \"גדות\",\n \"גדיש\",\n \"גדעונה\",\n \"גדרה\",\n \"גונן\",\n \"גורן\",\n \"גורנות הגליל\",\n \"גזית\",\n \"גזר\",\n \"גיאה\",\n \"גיבתון\",\n \"גיזו\",\n \"גילת\",\n \"גינוסר\",\n \"גינתון\",\n \"גיתה\",\n \"גיתית\",\n \"גלאון\",\n \"גלגל\",\n \"גלעד\",\n \"גמזו\",\n \"גן הדרום\",\n \"גן השומרון\",\n \"גן חיים\",\n \"גן יאשיה\",\n \"גן יבנה\",\n \"גן נר\",\n \"גן שורק\",\n \"גן שלמה\",\n \"גן שמואל\",\n \"גנות\",\n \"גנות הדר\",\n \"גני הדר\",\n \"גני טל\",\n \"גני יוחנן\",\n \"גני מודיעין\",\n \"גני עם\",\n \"גני תקווה\",\n \"גניגר\",\n \"געש\",\n \"געתון\",\n \"גפן\",\n \"גרופית\",\n \"גשור\",\n \"גשר\",\n \"גשר הזיו\",\n \"גת\",\n \"גת רימון\",\n \"דבוריה\",\n \"דביר\",\n \"דברת\",\n \"דגניה א\",\n \"דגניה ב\",\n \"דוב''ב\",\n \"דולב\",\n \"דור\",\n \"דורות\",\n \"דחי\",\n \"דימונה\",\n \"דיר אל-אסד\",\n \"דיר חנא\",\n \"דישון\",\n \"דליה\",\n \"דלית אל כרמל\",\n \"דלתון\",\n \"דמיידה\",\n \"דניאל\",\n \"דפנה\",\n \"דקל\",\n \"האון\",\n \"הבונים\",\n \"הגושרים\",\n \"הדר עם\",\n \"הוד השרון\",\n \"הודיה\",\n \"הודיות\",\n \"הושעיה\",\n \"הזורעים\",\n \"החותרים\",\n \"היוגב\",\n \"הילה\",\n \"המכללה האקדמית כנרת\",\n \"המעפיל\",\n \"המרכז האקדמי רופין\",\n \"הסוללים\",\n \"העוגן\",\n \"הר אדר\",\n \"הר ברכה\",\n \"הר גילה\",\n \"הר הנגב\",\n \"הר עמשא\",\n \"הר-חלוץ\",\n \"הראל\",\n \"הרדוף\",\n \"הרצליה - כל האזורים\",\n \"הרצליה - מערב\",\n \"הרצליה - מרכז וגליל ים\",\n \"הררית יחד\",\n \"ואדי אל חמאם\",\n \"ואדי אל נעם דרום\",\n \"ורד יריחו\",\n \"ורדון\",\n \"זבדיאל\",\n \"זוהר\",\n \"זיקים\",\n \"זיתן\",\n \"זכרון יעקב\",\n \"זכריה\",\n \"זמר\",\n \"זמרת, שובה\",\n \"זנוח\",\n \"זרועה\",\n \"זרזיר\",\n \"זרחיה\",\n \"זרעית\",\n \"ח'וואלד\",\n \"חבצלת השרון וצוקי ים\",\n \"חברון\",\n \"חג'אג'רה\",\n \"חגור\",\n \"חגלה\",\n \"חד נס\",\n \"חדיד\",\n \"חדרה - כל האזורים\",\n \"חדרה - מזרח\",\n \"חדרה - מערב\",\n \"חדרה - מרכז\",\n \"חדרה - נווה חיים\",\n \"חוות גלעד\",\n \"חוות יאיר\",\n \"חוות עדן\",\n \"חוות ערנדל\",\n \"חוות שדה בר\",\n \"חוות שיקמים\",\n \"חולדה\",\n \"חולון\",\n \"חולית\",\n \"חולתה\",\n \"חוסן\",\n \"חוסנייה\",\n \"חופית\",\n \"חוקוק\",\n \"חורה\",\n \"חורפיש\",\n \"חורשים\",\n \"חזון\",\n \"חי-בר יטבתה\",\n \"חיבת ציון\",\n \"חיננית\",\n \"חיפה - כל האזורים\",\n \"חיפה - כרמל ועיר תחתית\",\n \"חיפה - מערב\",\n \"חיפה - נווה שאנן ורמות כרמל\",\n \"חיפה - קריית חיים ושמואל\",\n \"חיפה-מפרץ\",\n \"חירן\",\n \"חלמיש\",\n \"חלץ\",\n \"חמד\",\n \"חמדיה\",\n \"חמדת\",\n \"חמרה\",\n \"חמת גדר\",\n \"חניאל\",\n \"חניתה\",\n \"חנתון\",\n \"חספין\",\n \"חפץ חיים\",\n \"חצב\",\n \"חצבה\",\n \"חצור\",\n \"חצור הגלילית\",\n \"חצרים\",\n \"חרב לאת\",\n \"חרוצים\",\n \"חרות\",\n \"חריש\",\n \"חרמש\",\n \"חרשה\",\n \"חרשים\",\n \"חשמונאים\",\n \"טבריה\",\n \"טובא זנגריה\",\n \"טורעאן\",\n \"טייבה\",\n \"טייבה בגלבוע\",\n \"טירה\",\n \"טירת יהודה\",\n \"טירת כרמל\",\n \"טירת צבי\",\n \"טל מנשה\",\n \"טל שחר\",\n \"טל-אל\",\n \"טללים\",\n \"טלמון\",\n \"טמרה\",\n \"טמרה בגלבוע\",\n \"טנא עומרים\",\n \"טפחות\",\n \"יבול\",\n \"יבנאל\",\n \"יבנה\",\n \"יגור\",\n \"יגל\",\n \"יד בנימין\",\n \"יד השמונה\",\n \"יד חנה\",\n \"יד מרדכי\",\n \"יד נתן\",\n \"יד רמב''ם\",\n \"יהוד-מונוסון\",\n \"יהל\",\n \"יובלים\",\n \"יודפת\",\n \"יונתן\",\n \"יושיביה\",\n \"יזרעאל\",\n \"יחיעם\",\n \"יטבתה\",\n \"ייט''ב\",\n \"יכיני\",\n \"ינוב\",\n \"ינוח-ג'ת\",\n \"ינון\",\n \"יסוד המעלה\",\n \"יסודות\",\n \"יסעור\",\n \"יעד\",\n \"יעף\",\n \"יערה\",\n \"יערות הכרמל\",\n \"יפיע\",\n \"יפית\",\n \"יפעת\",\n \"יפתח\",\n \"יצהר\",\n \"יציץ\",\n \"יקום\",\n \"יקיר\",\n \"יקנעם המושבה והזורע\",\n \"יקנעם עילית\",\n \"יראון\",\n \"ירדנה\",\n \"ירוחם\",\n \"ירושלים - אזור תעשייה עטרות\",\n \"ירושלים - דרום\",\n \"ירושלים - כל האזורים\",\n \"ירושלים - כפר עקב\",\n \"ירושלים - מזרח\",\n \"ירושלים - מערב\",\n \"ירושלים - מרכז\",\n \"ירושלים - צפון\",\n \"ירחיב\",\n \"ירכא\",\n \"ירקונה\",\n \"ישובי אומן\",\n \"ישובי יעל\",\n \"ישעי\",\n \"ישרש\",\n \"יתד\",\n \"כאבול\",\n \"כאוכב אבו אלהיג'א\",\n \"כברי\",\n \"כדורי\",\n \"כוכב השחר\",\n \"כוכב יאיר - צור יגאל\",\n \"כוכב יעקב\",\n \"כוכב מיכאל\",\n \"כורזים ורד הגליל\",\n \"כושי רמון\",\n \"כחל\",\n \"כינרת מושבה\",\n \"כינרת קבוצה\",\n \"כיסופים\",\n \"כיסרא סמיע\",\n \"כישור\",\n \"כלא דמון\",\n \"כליל\",\n \"כלנית\",\n \"כמהין\",\n \"כמון\",\n \"כנות\",\n \"כנף\",\n \"כסייפה\",\n \"כסלון\",\n \"כעביה\",\n \"כעביה טבאש\",\n \"כפר אביב\",\n \"כפר אדומים\",\n \"כפר אוריה\",\n \"כפר אחים\",\n \"כפר אלדד\",\n \"כפר ביאליק\",\n \"כפר ביל''ו\",\n \"כפר בלום\",\n \"כפר בן נון\",\n \"כפר ברא\",\n \"כפר ברוך\",\n \"כפר גדעון\",\n \"כפר גלים\",\n \"כפר גליקסון\",\n \"כפר גלעדי\",\n \"כפר גמילה מלכישוע\",\n \"כפר דניאל\",\n \"כפר האורנים\",\n \"כפר החורש\",\n \"כפר המכבי\",\n \"כפר הנגיד\",\n \"כפר הנוער ימין אורד\",\n \"כפר הנשיא\",\n \"כפר הס\",\n \"כפר הרא''ה\",\n \"כפר הרי''ף וצומת ראם\",\n \"כפר ויתקין\",\n \"כפר ורבורג\",\n \"כפר ורדים\",\n \"כפר זוהרים\",\n \"כפר זיתים\",\n \"כפר חב''ד\",\n \"כפר חיטים\",\n \"כפר חיים\",\n \"כפר חנניה\",\n \"כפר חסידים\",\n \"כפר חרוב\",\n \"כפר טבאש\",\n \"כפר טרומן\",\n \"כפר ידידיה\",\n \"כפר יהושע\",\n \"כפר יובל\",\n \"כפר יונה\",\n \"כפר יחזקאל\",\n \"כפר יסיף\",\n \"כפר יעבץ\",\n \"כפר כמא\",\n \"כפר כנא\",\n \"כפר מונש\",\n \"כפר מימון ותושיה\",\n \"כפר מל''ל\",\n \"כפר מנדא\",\n \"כפר מנחם\",\n \"כפר מסריק\",\n \"כפר מצר\",\n \"כפר מרדכי\",\n \"כפר נהר הירדן\",\n \"כפר נוער בן שמן\",\n \"כפר נטר\",\n \"כפר סאלד\",\n \"כפר סבא\",\n \"כפר סילבר\",\n \"כפר סירקין\",\n \"כפר עבודה\",\n \"כפר עזה\",\n \"כפר עציון\",\n \"כפר פינס\",\n \"כפר קאסם\",\n \"כפר קיש\",\n \"כפר קרע\",\n \"כפר רופין\",\n \"כפר רות\",\n \"כפר שמאי\",\n \"כפר שמואל\",\n \"כפר שמריהו\",\n \"כפר תבור\",\n \"כפר תפוח\",\n \"כפר תקווה\",\n \"כרכום\",\n \"כרם ביבנה\",\n \"כרם בן זמרה\",\n \"כרם בן שמן\",\n \"כרם מהר''ל\",\n \"כרם שלום\",\n \"כרמי יוסף\",\n \"כרמי צור\",\n \"כרמי קטיף\",\n \"כרמיאל\",\n \"כרמיה\",\n \"כרמים\",\n \"כרמית\",\n \"כרמל\",\n \"לבון\",\n \"לביא\",\n \"לבנים\",\n \"להב\",\n \"להבות הבשן\",\n \"להבות חביבה\",\n \"להבים\",\n \"לוד\",\n \"לוזית\",\n \"לוחמי הגטאות\",\n \"לוטם וחמדון\",\n \"לוטן\",\n \"לטרון\",\n \"לימן\",\n \"לכיש\",\n \"לפיד\",\n \"לפידות\",\n \"לקיה\",\n \"מאור\",\n \"מאיר שפיה\",\n \"מבוא ביתר\",\n \"מבוא דותן\",\n \"מבוא חורון\",\n \"מבוא חמה\",\n \"מבוא מודיעים\",\n \"מבואות יריחו\",\n \"מבועים\",\n \"מבטחים, עמיעוז, ישע\",\n \"מבקיעים\",\n \"מבשרת ציון\",\n \"מג'דל כרום\",\n \"מג'דל שמס\",\n \"מגדים\",\n \"מגדל\",\n \"מגדל העמק\",\n \"מגדל עוז\",\n \"מגדל תפן\",\n \"מגדלים\",\n \"מגל\",\n \"מגן\",\n \"מגן שאול\",\n \"מגרון\",\n \"מגשימים\",\n \"מדרך עוז\",\n \"מדרשת בן גוריון\",\n \"מודיעין\",\n \"מודיעין - ישפרו סנטר\",\n \"מודיעין - ליגד סנטר\",\n \"מודיעין עילית\",\n \"מולדת\",\n \"מועאוויה\",\n \"מוצא עילית\",\n \"מוקיבלה\",\n \"מורן\",\n \"מורשת\",\n \"מזור\",\n \"מזכרת בתיה\",\n \"מזרע\",\n \"מזרעה\",\n \"מחוז אילת\",\n \"מחוז בקעה\",\n \"מחוז בקעת בית שאן\",\n \"מחוז גולן דרום\",\n \"מחוז גולן צפון\",\n \"מחוז גליל עליון\",\n \"מחוז גליל תחתון\",\n \"מחוז דן\",\n \"מחוז דרום הנגב\",\n \"מחוז הכרמל\",\n \"מחוז המפרץ\",\n \"מחוז העמקים\",\n \"מחוז השפלה\",\n \"מחוז ואדי ערה\",\n \"מחוז יהודה\",\n \"מחוז ים המלח\",\n \"מחוז ירושלים\",\n \"מחוז ירקון\",\n \"מחוז לכיש\",\n \"מחוז מנשה\",\n \"מחוז מערב הנגב\",\n \"מחוז מערב לכיש\",\n \"מחוז מרכז הגליל\",\n \"מחוז מרכז הנגב\",\n \"מחוז עוטף עזה\",\n \"מחוז ערבה\",\n \"מחוז קו העימות\",\n \"מחוז שומרון\",\n \"מחוז שפלת יהודה\",\n \"מחוז שרון\",\n \"מחולה\",\n \"מחניים\",\n \"מחסיה\",\n \"מטווח ניר עם\",\n \"מטולה\",\n \"מטע\",\n \"מי עמי\",\n \"מייסר\",\n \"מיצד\",\n \"מיצר\",\n \"מירב\",\n \"מירון\",\n \"מישר\",\n \"מיתר\",\n \"מכון וינגייט\",\n \"מכורה\",\n \"מכמורת\",\n \"מכמנים\",\n \"מלאה\",\n \"מלונות ים המלח מרכז\",\n \"מלכיה\",\n \"ממשית\",\n \"מנוחה\",\n \"מנוף\",\n \"מנות\",\n \"מנחמיה\",\n \"מנחת מחניים\",\n \"מנרה\",\n \"מנשית זבדה\",\n \"מסד\",\n \"מסדה\",\n \"מסילות\",\n \"מסילת ציון\",\n \"מסלול\",\n \"מסעדה\",\n \"מע'אר\",\n \"מעברות\",\n \"מעגלים, גבעולים, מלילות\",\n \"מעגן\",\n \"מעגן מיכאל\",\n \"מעוז חיים\",\n \"מעון\",\n \"מעון צופיה\",\n \"מעונה\",\n \"מעיין ברוך\",\n \"מעיין צבי\",\n \"מעיליא\",\n \"מעלה אדומים\",\n \"מעלה אפרים\",\n \"מעלה גלבוע\",\n \"מעלה גמלא\",\n \"מעלה החמישה\",\n \"מעלה חבר\",\n \"מעלה לבונה\",\n \"מעלה מכמש\",\n \"מעלה עירון\",\n \"מעלה עמוס\",\n \"מעלה צביה\",\n \"מעלה רחבעם\",\n \"מעלה שומרון\",\n \"מעלות תרשיחא\",\n \"מענית\",\n \"מעש\",\n \"מפלסים\",\n \"מצדה\",\n \"מצובה\",\n \"מצוקי דרגות\",\n \"מצליח\",\n \"מצפה\",\n \"מצפה אבי''ב\",\n \"מצפה אילן\",\n \"מצפה יריחו\",\n \"מצפה נטופה\",\n \"מצפה רמון\",\n \"מצפה שלם\",\n \"מצר\",\n \"מקווה ישראל\",\n \"מרגליות\",\n \"מרום גולן\",\n \"מרחב עם\",\n \"מרחביה מושב\",\n \"מרחביה קיבוץ\",\n \"מרחצאות עין גדי\",\n \"מרכז אומן\",\n \"מרכז אזורי דרום השרון\",\n \"מרכז אזורי מבואות חרמון\",\n \"מרכז אזורי מגילות\",\n \"מרכז אזורי מרום גליל\",\n \"מרכז אזורי משגב\",\n \"מרכז חבר\",\n \"מרכז ימי קיסריה\",\n \"מרכז מיר''ב\",\n \"מרכז שפירא\",\n \"מרעית\",\n \"משאבי שדה\",\n \"משגב דב\",\n \"משגב עם\",\n \"משהד\",\n \"משואה\",\n \"משואות יצחק\",\n \"משכיות\",\n \"משמר איילון\",\n \"משמר דוד\",\n \"משמר הירדן\",\n \"משמר הנגב\",\n \"משמר העמק\",\n \"משמר השבעה\",\n \"משמר השרון\",\n \"משמרות\",\n \"משמרת\",\n \"משען\",\n \"מתחם בני דרום\",\n \"מתחם פי גלילות\",\n \"מתחם צומת שוקת\",\n \"מתן\",\n \"מתת\",\n \"מתתיהו\",\n \"נאות גולן\",\n \"נאות הכיכר\",\n \"נאות חובב\",\n \"נאות מרדכי\",\n \"נאות סמדר\",\n \"נבטים\",\n \"נבי סמואל\",\n \"נגבה\",\n \"נגוהות\",\n \"נהורה\",\n \"נהלל\",\n \"נהריה\",\n \"נוב\",\n \"נוגה\",\n \"נוה איתן\",\n \"נווה\",\n \"נווה אור\",\n \"נווה אטי''ב\",\n \"נווה אילן\",\n \"נווה דניאל\",\n \"נווה זוהר\",\n \"נווה זיו\",\n \"נווה חריף\",\n \"נווה ים\",\n \"נווה ימין\",\n \"נווה ירק\",\n \"נווה מבטח\",\n \"נווה מיכאל - רוגלית\",\n \"נווה שלום\",\n \"נועם\",\n \"נוף איילון\",\n \"נוף הגליל\",\n \"נופי נחמיה\",\n \"נופי פרת\",\n \"נופים\",\n \"נופית\",\n \"נופך\",\n \"נוקדים\",\n \"נורדיה\",\n \"נורית\",\n \"נחושה\",\n \"נחל עוז\",\n \"נחלה\",\n \"נחליאל\",\n \"נחלים\",\n \"נחם\",\n \"נחף\",\n \"נחשולים\",\n \"נחשון\",\n \"נחשונים\",\n \"נטועה\",\n \"נטור\",\n \"נטע\",\n \"נטעים\",\n \"נטף\",\n \"ניל''י\",\n \"נין\",\n \"ניצן\",\n \"ניצנה\",\n \"ניצני עוז\",\n \"ניצנים\",\n \"ניר אליהו\",\n \"ניר בנים\",\n \"ניר גלים\",\n \"ניר דוד\",\n \"ניר ח''ן\",\n \"ניר יפה\",\n \"ניר יצחק\",\n \"ניר ישראל\",\n \"ניר משה\",\n \"ניר עוז\",\n \"ניר עציון\",\n \"ניר עקיבא\",\n \"ניר צבי\",\n \"נירים\",\n \"נירית\",\n \"נמרוד\",\n \"נס הרים\",\n \"נס עמים\",\n \"נס ציונה\",\n \"נעורה\",\n \"נעורים\",\n \"נעלה\",\n \"נעמה\",\n \"נען\",\n \"נערן\",\n \"נצר חזני\",\n \"נצר סרני\",\n \"נצרת\",\n \"נריה\",\n \"נשר\",\n \"נתיב הגדוד\",\n \"נתיב הל''ה\",\n \"נתיב העשרה\",\n \"נתיב השיירה\",\n \"נתיבות\",\n \"נתניה - כל האזורים\",\n \"נתניה - מזרח\",\n \"נתניה - מערב\",\n \"סאג'ור\",\n \"סאסא\",\n \"סביון\",\n \"סגולה\",\n \"סואעד חמירה\",\n \"סולם\",\n \"סוסיא\",\n \"סופה\",\n \"סינמה סיטי גלילות\",\n \"סכנין\",\n \"סלמה\",\n \"סלעית\",\n \"סמר\",\n \"סנדלה\",\n \"סנסנה\",\n \"סעד\",\n \"סעייה-מולדה\",\n \"סער\",\n \"ספיר\",\n \"ספסופה - כפר חושן\",\n \"סתריה\",\n \"ע'ג'ר\",\n \"עבדון\",\n \"עבדת\",\n \"עברון\",\n \"עגור\",\n \"עדי\",\n \"עדי עד\",\n \"עדנים\",\n \"עוזה\",\n \"עוזייר\",\n \"עולש\",\n \"עומר\",\n \"עופר\",\n \"עופרים\",\n \"עוצם\",\n \"עזוז\",\n \"עזר\",\n \"עזריאל\",\n \"עזריה\",\n \"עזריקם\",\n \"עטרת\",\n \"עידן\",\n \"עיינות\",\n \"עילבון\",\n \"עילוט\",\n \"עין איילה\",\n \"עין אל אסד\",\n \"עין אל-סהלה\",\n \"עין בוקק\",\n \"עין גב\",\n \"עין גדי\",\n \"עין דור\",\n \"עין הבשור\",\n \"עין הוד\",\n \"עין החורש\",\n \"עין המפרץ\",\n \"עין הנצי''ב\",\n \"עין העמק\",\n \"עין השופט\",\n \"עין השלושה\",\n \"עין ורד\",\n \"עין זיוון\",\n \"עין חוד\",\n \"עין חצבה\",\n \"עין חרוד\",\n \"עין חרוד איחוד\",\n \"עין יהב\",\n \"עין יעקב\",\n \"עין כמונים\",\n \"עין כרמל\",\n \"עין מאהל\",\n \"עין נקובא\",\n \"עין עירון\",\n \"עין צורים\",\n \"עין קנייא\",\n \"עין ראפה\",\n \"עין שמר\",\n \"עין שריד\",\n \"עין תמר\",\n \"עינבר\",\n \"עינת\",\n \"עיר אובות\",\n \"עכו\",\n \"עכו - אזור תעשייה\",\n \"עלומים\",\n \"עלי\",\n \"עלי זהב\",\n \"עלמה\",\n \"עלמון\",\n \"עמוקה\",\n \"עמיחי\",\n \"עמינדב\",\n \"עמיעד\",\n \"עמיקם\",\n \"עמיר\",\n \"עמנואל\",\n \"עמקה\",\n \"ענב\",\n \"עספיא\",\n \"עפולה\",\n \"עפרה\",\n \"עץ אפרים\",\n \"עצמון - שגב\",\n \"עראבה\",\n \"ערב אל עראמשה\",\n \"ערב אל-נעים\",\n \"ערד\",\n \"ערוגות\",\n \"ערערה\",\n \"ערערה בנגב\",\n \"עשאהל\",\n \"עשרת\",\n \"עתלית\",\n \"עתניאל\",\n \"פארן\",\n \"פארק תעשיות פלמחים\",\n \"פארק תעשייה ראם\",\n \"פדואל\",\n \"פדויים\",\n \"פדיה\",\n \"פוריה כפר עבודה\",\n \"פוריה נווה עובד\",\n \"פוריה עילית\",\n \"פוריידיס\",\n \"פורת\",\n \"פטיש\",\n \"פלך\",\n \"פלמחים\",\n \"פני קדם\",\n \"פנימיית עין כרם\",\n \"פסגות\",\n \"פסוטה\",\n \"פעמי תש''ז\",\n \"פצאל\",\n \"פקיעין\",\n \"פקיעין החדשה\",\n \"פרדס חנה-כרכור\",\n \"פרדסיה\",\n \"פרוד\",\n \"פרי גן\",\n \"פתח תקווה\",\n \"פתחיה\",\n \"צאלים\",\n \"צבעון\",\n \"צובה\",\n \"צוחר, אוהד\",\n \"צופים\",\n \"צופית\",\n \"צופר\",\n \"צוקים\",\n \"צור הדסה\",\n \"צור יצחק\",\n \"צור משה\",\n \"צור נתן\",\n \"צוריאל\",\n \"צורית גילון\",\n \"ציפורי\",\n \"צלפון\",\n \"צפריה\",\n \"צפרירים\",\n \"צפת\",\n \"צרופה\",\n \"צרעה\",\n \"קבוצת גבע\",\n \"קבוצת יבנה\",\n \"קדומים\",\n \"קדימה-צורן\",\n \"קדיתא\",\n \"קדמה\",\n \"קדמת צבי\",\n \"קדר\",\n \"קדרון\",\n \"קדרים\",\n \"קדש ברנע\",\n \"קוממיות\",\n \"קורנית\",\n \"קטורה\",\n \"קיבוץ דן\",\n \"קיבוץ מגידו\",\n \"קידה\",\n \"קיסריה\",\n \"קלחים\",\n \"קליה\",\n \"קלנסווה\",\n \"קלע\",\n \"קציר\",\n \"קצר-א-סיר\",\n \"קצרין\",\n \"קצרין - אזור תעשייה\",\n \"קריית אונו\",\n \"קריית אתא\",\n \"קריית ביאליק\",\n \"קריית גת, כרמי גת\",\n \"קריית חינוך מרחבים\",\n \"קריית טבעון-בית זייד\",\n \"קריית ים\",\n \"קריית יערים\",\n \"קריית מוצקין\",\n \"קריית מלאכי\",\n \"קריית נטפים\",\n \"קריית ענבים\",\n \"קריית עקרון\",\n \"קריית שמונה\",\n \"קרית ארבע\",\n \"קרני שומרון\",\n \"קשת\",\n \"ראמה\",\n \"ראס אל-עין\",\n \"ראס עלי\",\n \"ראש הנקרה\",\n \"ראש העין\",\n \"ראש פינה\",\n \"ראש צורים\",\n \"ראשון לציון - כל האזורים\",\n \"ראשון לציון - מזרח\",\n \"ראשון לציון - מערב\",\n \"רבבה\",\n \"רבדים\",\n \"רביבים\",\n \"רביד\",\n \"רגבה\",\n \"רגבים\",\n \"רהט\",\n \"רווחה\",\n \"רוויה\",\n \"רוחמה\",\n \"רומאנה\",\n \"רומת אל הייב\",\n \"רועי\",\n \"רותם\",\n \"רחוב\",\n \"רחובות\",\n \"רחלים\",\n \"רטורנו - גבעת שמש\",\n \"ריחאנייה\",\n \"ריחן\",\n \"ריינה\",\n \"רימונים\",\n \"רינתיה\",\n \"רכסים\",\n \"רם און\",\n \"רמות\",\n \"רמות השבים\",\n \"רמות מאיר\",\n \"רמות מנשה\",\n \"רמות נפתלי\",\n \"רמלה\",\n \"רמת גן - כל האזורים\",\n \"רמת גן - מזרח\",\n \"רמת גן - מערב\",\n \"רמת דוד\",\n \"רמת הכובש\",\n \"רמת הנדיב\",\n \"רמת השופט\",\n \"רמת השרון\",\n \"רמת יוחנן\",\n \"רמת ישי\",\n \"רמת מגשימים\",\n \"רמת צבי\",\n \"רמת רזיאל\",\n \"רמת רחל\",\n \"רנן\",\n \"רעים\",\n \"רעננה\",\n \"רקפת\",\n \"רשפון\",\n \"רשפים\",\n \"רתמים\",\n \"שאנטי במדבר\",\n \"שאר ישוב\",\n \"שבות רחל\",\n \"שבי דרום\",\n \"שבי ציון\",\n \"שבי שומרון\",\n \"שבלי\",\n \"שגב שלום\",\n \"שדה אברהם\",\n \"שדה אילן\",\n \"שדה אליהו\",\n \"שדה אליעזר\",\n \"שדה בוקר\",\n \"שדה דוד\",\n \"שדה ורבורג\",\n \"שדה יואב\",\n \"שדה יעקב\",\n \"שדה יצחק\",\n \"שדה משה\",\n \"שדה נחום\",\n \"שדה נחמיה\",\n \"שדה ניצן\",\n \"שדה עוזיהו\",\n \"שדה צבי\",\n \"שדות ים\",\n \"שדות מיכה\",\n \"שדי חמד\",\n \"שדי תרומות\",\n \"שדמה\",\n \"שדמות דבורה\",\n \"שדמות מחולה\",\n \"שדרות, איבים, ניר עם\",\n \"שהם\",\n \"שואבה\",\n \"שובל\",\n \"שומרה\",\n \"שומריה\",\n \"שומרת\",\n \"שוקדה\",\n \"שורש\",\n \"שורשים\",\n \"שושנת העמקים\",\n \"שזור\",\n \"שחר\",\n \"שחרות\",\n \"שיבולים\",\n \"שיטים\",\n \"שייח' דנון\",\n \"שילה\",\n \"שילת\",\n \"שכניה\",\n \"שלווה\",\n \"שלוחות\",\n \"שלומי\",\n \"שלומית\",\n \"שלפים\",\n \"שמיר\",\n \"שמעה\",\n \"שמשית\",\n \"שני ליבנה\",\n \"שניר\",\n \"שעב\",\n \"שעל\",\n \"שעלבים\",\n \"שער אפרים\",\n \"שער הגולן\",\n \"שער העמקים\",\n \"שער מנשה\",\n \"שערי תקווה\",\n \"שפיים\",\n \"שפיר\",\n \"שפר\",\n \"שפרעם\",\n \"שקד\",\n \"שקף\",\n \"שרונה\",\n \"שריגים - ליאון\",\n \"שריד\",\n \"שרשרת\",\n \"שתולה\",\n \"שתולים\",\n \"תארבין\",\n \"תאשור\",\n \"תדהר\",\n \"תובל\",\n \"תומר\",\n \"תחנת רכבת כפר יהושוע\",\n \"תחנת רכבת ראש העין\",\n \"תימורים\",\n \"תירוש\",\n \"תל אביב - דרום העיר ויפו\",\n \"תל אביב - כל האזורים\",\n \"תל אביב - מזרח\",\n \"תל אביב - מרכז העיר\",\n \"תל אביב - עבר הירקון\",\n \"תל חי\",\n \"תל יוסף\",\n \"תל יצחק\",\n \"תל מונד\",\n \"תל עדשים\",\n \"תל ערד\",\n \"תל ציון\",\n \"תל קציר\",\n \"תל שבע\",\n \"תל תאומים\",\n \"תלם\",\n \"תלמי אליהו\",\n \"תלמי אלעזר\",\n \"תלמי ביל''ו\",\n \"תלמי יוסף\",\n \"תלמי יחיאל\",\n \"תלמי יפה\",\n \"תלמים\",\n \"תמרת\",\n \"תנובות\",\n \"תעוז\",\n \"תעשיון חצב\",\n \"תעשיון צריפין\",\n \"תפרח\",\n \"תקומה\",\n \"תקומה וחוות יזרעם\",\n \"תקוע\",\n \"תרום\",\n]" } ]
import contextlib import voluptuous as vol import homeassistant.helpers.config_validation as cv from typing import Any from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow from homeassistant.core import async_get_hass, callback from homeassistant.data_entry_flow import FlowResult from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import selector from .const import ( CONF_AREAS, CONF_ALERT_MAX_AGE, CONF_OFF_ICON, CONF_ON_ICON, CONF_POLL_INTERVAL, DEFAULT_ALERT_MAX_AGE, DOMAIN, DEFAULT_OFF_ICON, DEFAULT_ON_ICON, DEFAULT_POLL_INTERVAL, TITLE, ) from .metadata.area_to_polygon import find_area from .metadata.areas_and_groups import AREAS_AND_GROUPS
18,574
"""Config flow for oref_alert integration.""" from __future__ import annotations AREAS_CONFIG = selector.SelectSelectorConfig( options=AREAS_AND_GROUPS, mode=selector.SelectSelectorMode.DROPDOWN, multiple=True, custom_value=False, ) CONFIG_SCHEMA = vol.Schema( {vol.Required(CONF_AREAS, default=[]): selector.SelectSelector(AREAS_CONFIG)} )
"""Config flow for oref_alert integration.""" from __future__ import annotations AREAS_CONFIG = selector.SelectSelectorConfig( options=AREAS_AND_GROUPS, mode=selector.SelectSelectorMode.DROPDOWN, multiple=True, custom_value=False, ) CONFIG_SCHEMA = vol.Schema( {vol.Required(CONF_AREAS, default=[]): selector.SelectSelector(AREAS_CONFIG)} )
class OrefAlertConfigFlow(ConfigFlow, domain=DOMAIN):
6
2023-10-18 11:16:41+00:00
24k
RobertCsordas/moe
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: bool, layers: List[torch.nn.Module], n_prev_states: int,\n n_prev_states_test: Optional[int] = None, adaptive_cutoffs: List[int] = [],\n same_length_eval: bool = True, norm_before_output: bool = False,\n p_drop_layer: float = 0.0, use_last_state: bool = False, same_length: bool = False,\n output_mode: str = \"normal\"):\n\n super().__init__()\n\n self.embedding = torch.nn.Embedding(voc_size, embedding_size or state_size)\n # with torch.no_grad():\n # self.embedding.weight.uniform_(-0.1, 0.1)\n\n torch.nn.init.xavier_uniform_(self.embedding.weight)\n\n self.shared_layers = all([la is layers[0] for la in layers])\n\n if embedding_size is None:\n self.embedding_adapter = lambda x: x\n else:\n self.embedding_adapter = torch.nn.Linear(embedding_size, state_size)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.layers = torch.nn.ModuleList(layers)\n self.output_adapter = lambda x: x\n self.n_prev_states = n_prev_states\n self.n_prev_states_test = n_prev_states_test or n_prev_states\n self.same_length_eval = same_length_eval\n self.embedding_scale = math.sqrt(state_size)\n self.p_drop_layer = p_drop_layer\n self.use_last_state = use_last_state\n self.same_length = same_length\n self.iter = 0\n self.output_mode = output_mode\n\n assert self.output_mode in {\"normal\", \"sum\", \"geometric\", \"sigmoid\"}\n\n if self.output_mode in {\"geometric\", \"sigmoid\"}:\n self.output_gate = torch.nn.Linear(state_size, 1)\n\n self.adaptive = bool(adaptive_cutoffs)\n\n out_proj_size = (embedding_size or state_size) if tied_embedding else state_size\n if self.adaptive:\n self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss(\n out_proj_size, voc_size, adaptive_cutoffs, div_value=1,\n tied_to=self.embedding if tied_embedding else None)\n else:\n self.output = torch.nn.Linear(out_proj_size, voc_size)\n\n if norm_before_output or self.output_mode in {\"sum\", \"sigmoid\"}:\n self.out_norm = torch.nn.LayerNorm(state_size)\n else:\n self.out_norm = lambda x: x\n\n if tied_embedding:\n if not self.adaptive:\n self.output.weight = self.embedding.weight\n if embedding_size is not None:\n self.output_adapter = torch.nn.Linear(state_size, embedding_size)\n\n @staticmethod\n def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor:\n return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1)\n\n def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor:\n net = self.out_norm(x)\n net = self.output_adapter(net)\n net = self.dropout(net)\n\n if self.adaptive:\n net = self.output(net.transpose(0, 1), target)\n else:\n net = self.output(net.transpose(0, 1))\n\n return net\n\n def accumulate_output(self, features: List[torch.Tensor]) -> torch.Tensor:\n if self.output_mode == \"sum\":\n return sum(features)\n elif self.output_mode in {\"geometric\", \"sigmoid\"}:\n # Must cast it to float16, otherwise pytorch will crash after a few hundred iterations with an\n # incomprehensible error in the gradient scaler\n gates = torch.sigmoid(torch.cat([self.output_gate(f).float() for f in features], -1))\n if self.output_mode == \"geometric\":\n ngates = torch.cumprod(1.0 - gates, -1)\n scores = torch.cat([gates[..., 0:1], gates[..., 1:] * ngates[..., :-1]], -1)\n else:\n scores = gates\n\n if self.iter % 100 == 0 and self.training:\n self.log(\"output_gate_mean\", framework.visualize.plot.Barplot(scores.flatten(end_dim=-2).mean(0)))\n # return sum(f * scores[..., i: i+1] for i, f in enumerate(features))\n f = scores.unsqueeze(-2) @ torch.stack(features, -2)\n return f.squeeze(-2)\n else:\n assert False, \"Invalid output mode\"\n\n def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]:\n causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device)\n\n net = self.dropout(self.embedding(x.T.long()))\n net = self.embedding_adapter(net)\n net = net * self.embedding_scale\n\n new_state = []\n features = [net]\n\n n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test\n\n same_length = self.same_length or ((not self.training) and self.same_length_eval)\n if same_length and state is not None:\n causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \\\n [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask]\n causality_mask = torch.cat(causality_mask, -1)\n\n plot_cossim = (self.iter % 100 == 0 and self.training)\n for li, l in enumerate(self.layers):\n if n_prev_states > 0:\n if li == 0:\n # Pos offset should be constant for all layers\n pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0\n\n # Concatenate the new state with the previous states\n li_r = 0 if self.use_last_state else li\n s = (state[li_r] + [net]) if state is not None else [net]\n attend_to = torch.cat(s, 1)\n\n if not self.use_last_state:\n s[-1] = s[-1].detach()\n new_state.append(s[-n_prev_states:])\n else:\n pos_offset = None\n attend_to = None\n\n net_o = l(net, mask=AttentionMask(None, causality_mask), attend_to=attend_to,\n pos_offset=pos_offset)\n\n if plot_cossim or self.output_mode != \"normal\":\n features.append(net_o)\n\n with torch.no_grad():\n ndiff = torch.norm(net_o - net, p=2, dim=-1)\n n_in = torch.norm(net, p=2, dim=-1)\n self.log(f\"activation_norm/abs_update_layer_{li}\", ndiff.mean())\n self.log(f\"activation_norm/in_layer_{li}\", n_in.mean())\n self.log(f\"activation_norm/rel_update_layer_{li}\", (ndiff/n_in.clamp(min=torch.finfo(n_in.dtype).eps)).mean())\n\n if self.training and self.p_drop_layer > 0.0:\n net = torch.where(torch.rand_like(net_o[..., 0:1]) < self.p_drop_layer, net, net_o)\n else:\n net = net_o\n\n if self.use_last_state and n_prev_states > 0:\n # If we carry over the last state, save it here\n new_state = [((state[0] if state is not None else []) + [net.detach()])[-n_prev_states:]]\n\n if self.output_mode != \"normal\":\n net = self.accumulate_output(features)\n\n if plot_cossim:\n with torch.no_grad():\n f_sample = [f.view(-1, f.shape[-1])[:1024] for f in features]\n f_sample_all = torch.stack(f_sample, -2)\n scores = framework.utils.cossim(f_sample_all, f_sample_all).mean(0)\n self.log(\"feature_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n if self.output_mode != \"normal\":\n f_sample = [self.accumulate_output(f_sample[:i]) for i in range(1, len(f_sample)+1)]\n f_sample_all = torch.stack(f_sample, -2)\n\n outs = F.softmax(self.gen_output(f_sample_all, target).transpose(0, 1), -1)\n scores = framework.utils.cossim(outs, outs).mean(0)\n self.log(\"out_dist_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n real_out = outs[:, -1]\n for i in range(outs.shape[-2] - 1):\n self.log(f\"out_diff_{i}\", (outs[:, i] - real_out).norm(dim=-1, p=1).mean())\n\n del outs\n del features\n\n net = self.gen_output(net, target)\n self.iter += 1\n\n return net, new_state" }, { "identifier": "task", "path": "tasks/task_db.py", "snippet": "def task(name: Optional[str] = None):\n def wrapper(cls):\n n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))\n assert n not in TASKS, f\"Task {n} already exists\"\n TASKS[n] = cls\n return cls\n return wrapper" }, { "identifier": "args", "path": "tasks/task_db.py", "snippet": "def args(fn):\n global ARGS_REGISTERS\n ARGS_REGISTERS.append(fn)\n return fn" }, { "identifier": "RelativeTransformerEncoderLayer", "path": "layers/transformer/relative_transformer.py", "snippet": "class RelativeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, test_pos_clamp: Optional[int] = None, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, ln_after_attention: bool = True):\n super().__init__()\n self.ln_after_attention = ln_after_attention\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n if ln_after_attention:\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.self_attn(src, attend_to if attend_to is not None else src, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src = self.norm1(src) if self.ln_after_attention else src\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "PrelnRelativeTransformerEncoderLayer", "path": "layers/transformer/relative_preln_transformer.py", "snippet": "class PrelnRelativeTransformerEncoderLayer(RelativeTransformerEncoderLayer):\n is_preln = True\n\n def __init__(self, d_model, nhead, n_layers: int, dim_feedforward=2048, dropout=0.1,\n activation: ActivationFunction = F.relu, attention_dropout=0, test_pos_clamp: Optional[int] = None,\n drop_expand: bool = True, head_projection_size: Optional[int] = None):\n super().__init__(\n d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout,\n activation=activation, attention_dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n drop_expand=drop_expand, head_projection_size=head_projection_size)\n\n reset_prenorm_params(self, n_layers)\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n return src" }, { "identifier": "PrelnRelativeKVMemTransformerEncoderLayer", "path": "layers/transformer/relative_preln_kvmem_transformer.py", "snippet": "class PrelnRelativeKVMemTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, n_keys: Union[int, Tuple[int, int]], n_layers: int, dim_feedforward=2048,\n dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None, pkm_heads: int = 1, pkm_stochastic: bool = True,\n pkm_custom_init: int = 0, pkm_slice_values: bool = False,\n pkm_knn: int = 32, linproj: bool = False, head_merge_topk: bool = False, load_balance: bool = True,\n kvmem_dropout: str = \"none\", kvmem_randomize_indices: bool = False, kvmem_query_bias: bool = False,\n standard_parallel: bool = False, approx_topk: bool = False, factorize: bool = False,\n full_key: bool = False, key_redundancy_factor: int = 1, two_stage: bool = False,\n factors: Optional[List[int]] = None, head_exclusive: bool = False,\n head_projection_size: Optional[int] = None):\n super().__init__()\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n\n self.pkm = LowrankApproximate2Layer(\n d_model, n_keys, pkm_heads, stochastic=pkm_stochastic, custom_init=pkm_custom_init,\n weight_scale=math.sqrt(2.0 / n_layers), slice_values=pkm_slice_values, knn=pkm_knn,\n head_merge_topk=head_merge_topk, load_balance=load_balance, dropout=dropout,\n query_proj=linproj, randomize_indices=kvmem_randomize_indices, dropout_mode=kvmem_dropout,\n query_bias=kvmem_query_bias, approx=approx_topk, factorize=factorize, full_key=full_key,\n key_redundancy_factor=key_redundancy_factor, two_stage=two_stage, factors=factors,\n head_exclusive=head_exclusive, activation=activation)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.standard_parallel = standard_parallel\n\n reset_prenorm_params(self, n_layers)\n\n if self.standard_parallel:\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward, bias=False)\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model, bias=False)\n\n initializer = self.pkm.get_custom_init()\n\n s_real = dim_feedforward + self.pkm.size\n # s_real = dim_feedforward + self.pkm.heads * self.pkm.knn\n initializer(self.linear2.weight, std=math.sqrt(2 / (n_layers * s_real)))\n initializer(self.pkm.values.weight, std=math.sqrt(2 / (n_layers * s_real)))\n initializer(self.linear1.weight, std=math.sqrt(2 / (n_layers * d_model)))\n\n if self.pkm.two_stage:\n initializer(self.pkm.full_keys, std=math.sqrt(2 / (n_layers * d_model)))\n\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout(src2)\n src2 = self.norm2(src)\n src3 = self.pkm(src2)\n\n if self.standard_parallel:\n src3 = src3 + self.linear2(self.dropout(self.activation(self.linear1(src2))))\n\n src = src + self.dropout(src3)\n return src" }, { "identifier": "RelativeMoeTransformerEncoderLayer", "path": "layers/transformer/relative_moe_transformer.py", "snippet": "class RelativeMoeTransformerEncoderLayer(LoggingLayer, torch.nn.Module):\n def __init__(self, d_model, nhead, n_experts: int, expert_size: int, n_layers: int, dim_feedforward=2048,\n dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None, knn: int = 0,\n standard_parallel: bool = False, custom_init: int = 0,\n dropout_mode: str = \"none\", selection_mode: str = \"add\",\n perplexity_reg: float = 0.0, key_mode: str = \"moe\", half_key: bool = False,\n n_heads: int = 1, norm_keys: bool = False, perplexity_reg_mode: str=\"step\",\n n_random: int = 0, reg_type: str = \"normal\", std_correction: bool = False,\n topk_mode: str = \"full\", head_projection_size: Optional[int] = None,\n activation_after_topk: bool = False, weight_grouping: str = \"none\",\n kmeans_distance: str = \"cosine\", drop_parallel: bool = True, block_expert_sel_in_grad: bool = False,\n mlp_selection: bool = False, classification_target: str = \"sum\",\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n norm_standard_parallel_values: bool = False, identical_init: bool = False,\n topological_sel_reg: float = 0.0, topological_expert_reg: float = 0.0,\n gumbel_select_only: bool = False, topk_value_norm_compensation: bool = False,\n norm_expert_scores: bool = False, sel_input_cluster_init: bool = False,\n init_norm_mode: str = \"full\", sel_bias: bool = False,\n bias: bool = False, rescale_normed: bool = False, sel_norm: str = \"none\",\n rescale_grads: bool = False, gumbel_decay: int = 0, preln: bool = True, ln_affine: bool = True,\n sinkhorn_local: bool = False, sinkhorn_n_iters: int = 3, moe_dropout_factor: float = 1.0,\n drop_expert: float = 0.0, expert_size_init: bool = False, sync_distributed: bool = True,\n modulation_amplitude: float = 0.5, invisible_selection: bool = False,\n slope_multiplier: float = 1.0, moe_init_scale: float = 1.0):\n super().__init__()\n self.preln = preln\n self.i = 0\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n\n std_scale = math.sqrt(2.0 / n_layers) if preln else 1.0\n std_scale *= math.sqrt(moe_init_scale)\n\n self.pkm = MoE(\n d_model, n_experts, expert_size, knn=knn, dropout=dropout * moe_dropout_factor, dropout_mode=dropout_mode,\n weight_scale=std_scale, custom_init=custom_init, selection_mode=selection_mode,\n perplexity_reg=perplexity_reg, key_mode=key_mode, half_key=half_key, n_heads=n_heads,\n norm_keys=norm_keys, perplexity_reg_mode=perplexity_reg_mode, n_random=n_random,\n reg_type=reg_type, std_correction=std_correction, topk_mode=topk_mode,\n activation_after_topk=activation_after_topk, weight_grouping=weight_grouping,\n kmeans_distance=kmeans_distance, activation=activation, block_expert_sel_in_grad=block_expert_sel_in_grad,\n mlp_selection=mlp_selection, classification_target=classification_target,\n normalize_expert_sel_init=normalize_expert_sel_init, norm_key_init=norm_key_init,\n norm_value_init=norm_value_init, identical_init=identical_init, topological_sel_reg=topological_sel_reg,\n topological_expert_reg=topological_expert_reg, gumbel_select_only=gumbel_select_only,\n topk_value_norm_compensation=topk_value_norm_compensation, norm_expert_scores=norm_expert_scores,\n sel_input_cluster_init=sel_input_cluster_init,\n n_parallel_expert_channels=dim_feedforward if standard_parallel else 0,\n init_norm_mode=init_norm_mode, sel_bias=sel_bias, bias=bias, rescale_normed=rescale_normed,\n sel_norm=sel_norm, rescale_grads=rescale_grads, gumbel_decay=gumbel_decay,\n sinkhorn_local=sinkhorn_local, sinkhorn_n_iters=sinkhorn_n_iters, expert_dropout=drop_expert,\n expert_size_init=expert_size_init, sync_distributed=sync_distributed,\n modulation_amplitude=modulation_amplitude, invisible_selection=invisible_selection,\n slope_multiplier=slope_multiplier)\n\n self.norm1 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.norm2 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.dropout = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.standard_parallel = standard_parallel\n self.drop_parallel = drop_parallel\n\n if preln:\n reset_prenorm_params(self, n_layers)\n\n if self.standard_parallel:\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward, bias=bias)\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model, bias=False)\n\n s_real = dim_feedforward + self.pkm.size\n # s_real = dim_feedforward + self.pkm.heads * self.pkm.knn\n\n init = self.pkm.get_initializer()\n\n init(self.linear1.weight, std=std_scale * math.sqrt(1.0 / d_model))\n init(self.linear2.weight, std=std_scale * math.sqrt(1.0 / s_real))\n\n if norm_standard_parallel_values:\n with torch.no_grad():\n self.linear2.weight.div_(self.linear2.weight.norm(dim=0, keepdim=True))\n\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n\n src2 = self.norm1(src) if self.preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout(src2)\n\n if self.preln:\n src2 = self.norm2(src)\n else:\n src = src2 = self.norm1(src)\n\n if self.i == 3:\n with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:\n src3 = self.pkm(src2)\n prof.export_chrome_trace(\"trace.json\")\n assert False\n else:\n src3 = self.pkm(src2)\n\n # self.i += 1\n\n if self.standard_parallel:\n x = self.linear1(src2)\n with torch.no_grad():\n self.log(\"standard_parallel_relu_pass_rate\", (x > 0).flatten(end_dim=-2).float().mean().item())\n x = self.activation(x)\n if self.drop_parallel:\n x = self.dropout(x)\n src3 = src3 + self.linear2(x)\n\n src = src + self.dropout(src3)\n if not self.preln:\n src = self.norm2(src)\n\n return src" }, { "identifier": "TopkTransformer", "path": "layers/transformer/topk_transformer.py", "snippet": "class TopkTransformer(PrelnRelativeTransformerEncoderLayer, LoggingLayer):\n def __init__(self, d_model, nhead, n_layers: int, dim_feedforward=2048, dropout=0.1,\n activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None, drop_expand: bool = True, k: int = 32,\n use_norm: bool = True, head_projection_size: Optional[int] = None):\n\n super().__init__(d_model, nhead, n_layers, dim_feedforward, dropout, activation, attention_dropout,\n test_pos_clamp, drop_expand, head_projection_size=head_projection_size)\n\n LoggingLayer.__init__(self)\n self.k = k\n self.use_norm = use_norm\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n\n middle = self.dropout(self.activation(self.linear1(src2)))\n\n with torch.no_grad():\n if self.use_norm:\n norms = self.linear2.weight.norm(dim=0)\n vals = - middle * norms\n else:\n vals = - middle\n mask = vals > vals.kthvalue(self.k, keepdim=True)[0]\n\n self.log(\"relu_pass_rate_before\", (middle > 0).float().mean())\n\n middle = middle.masked_fill(mask, 0)\n\n self.log(\"topk_positive_rate\", (middle > 0).float().sum(-1).mean()/self.k)\n\n src2 = self.linear2(middle)\n src = src + self.dropout2(src2)\n return src" }, { "identifier": "MoE", "path": "layers/moe_layer.py", "snippet": "class MoE(LoggingLayer, RegularizedLayer, OncePerIterLayer, torch.nn.Module):\n def __init__(self, dmodel: int, n_experts: int, expert_size: int, n_heads: int, knn: int = 0,\n dropout: float = 0, weight_scale: float = 1.0, custom_init: int = 0,\n dropout_mode: str = \"none\", selection_mode: str = \"add\", perplexity_reg: float = 0.0,\n key_mode: str = \"moe\", half_key: bool = False, norm_keys: bool = False,\n perplexity_reg_mode: str=\"step\", n_random: int = 0, reg_type: str = \"entropy\",\n std_correction: bool = False, topk_mode: str = \"full\", activation_after_topk: bool = False,\n weight_grouping: str = \"none\", kmeans_distance: str = \"cosine\",\n activation = lambda x: F.relu(x, inplace=True), block_expert_sel_in_grad: bool = False,\n mlp_selection: bool = False, classification_target: str = \"sum\",\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n identical_init: bool = False, topological_sel_reg: float = 0.0, topological_expert_reg: float = 0.0,\n gumbel_select_only: bool = False, topk_value_norm_compensation: bool = False,\n norm_expert_scores: bool = False, sel_input_cluster_init: bool = False,\n n_parallel_expert_channels: int = 0, init_norm_mode: str = \"full\", sel_bias: bool = False,\n bias: bool = False, rescale_normed: bool = False, sel_norm: str = \"none\",\n rescale_grads: bool = False, gumbel_decay: int = 0, v_dim: Optional[int] = None,\n sinkhorn_local: bool = False, sinkhorn_n_iters: int = 3, expert_dropout: float = 0.0,\n expert_size_init: bool = False, sync_distributed: bool = False,\n modulation_amplitude: float = 0.5, invisible_selection: bool = False,\n slope_multiplier: float = 1.0):\n\n super().__init__()\n self.custom_init = custom_init\n self.k_dim = dmodel\n self.v_dim = v_dim if v_dim is not None else dmodel\n self.n_experts = n_experts\n self.expert_size = expert_size\n self.size = self.n_experts * self.expert_size\n self.knn = knn\n self.dropout = dropout\n self.dropout_mode = dropout_mode\n self.selection_mode = selection_mode\n self.perplexity_reg = perplexity_reg\n self.half_key = half_key\n self.key_mode = key_mode\n self.k_vec_dim = self.k_dim // (2 if half_key else 1)\n self.n_heads = n_heads\n self.norm_keys = norm_keys\n self.perplexity_reg_mode = perplexity_reg_mode\n self.n_random = n_random\n self.reg_type = reg_type\n self.topk_mode = topk_mode\n self.activation_after_topk = activation_after_topk\n self.weight_grouping = weight_grouping\n self.kmeans_distance = kmeans_distance\n self.activation = activation\n self.block_expert_sel_in_grad = block_expert_sel_in_grad\n self.mlp_selection = mlp_selection\n self.classification_target = classification_target\n self.weight_scale = weight_scale\n self.normalize_expert_sel_init = normalize_expert_sel_init\n self.norm_key_init = norm_key_init\n self.norm_value_init = norm_value_init\n self.identical_init = identical_init\n self.topological_sel_reg = topological_sel_reg\n self.topological_expert_reg = topological_expert_reg\n self.gumbel_select_only = gumbel_select_only\n self.topk_value_norm_compensation = topk_value_norm_compensation\n self.norm_expert_scores = norm_expert_scores\n self.sel_input_cluster_init = sel_input_cluster_init\n self.iter = 0\n self.layer = 0\n self.initalized = False\n self.rescale_normed = rescale_normed\n self.sel_norm = sel_norm\n self.rescale_grads = rescale_grads\n self.gumbel_decay = gumbel_decay\n self.was_training = True\n self.sinkhorn_local = sinkhorn_local\n self.sinkhorn_n_iters = sinkhorn_n_iters\n self.expert_dropout = expert_dropout\n self.reg_counts = 0\n self.sync_distributed = sync_distributed and torch.distributed.is_initialized()\n self.modulation_amplitude = modulation_amplitude\n self.invisible_selection = invisible_selection\n self.slope_multiplier = slope_multiplier\n\n self.coocurence = None\n\n assert self.selection_mode in {\"add\", \"gate\", \"sigmoid\", \"gumbel\", \"hard_gumbel\", \"gumbel_sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"sinkhorn_local\", \"mul\", \"random\", \"sinkmoid2\", \"sinkmax2\", \"modulate\"}\n assert self.perplexity_reg_mode in {\"step\", \"global\", \"time\", \"global_time\"}\n assert self.dropout_mode in {\"none\", \"score\"}\n assert self.reg_type in {\"perplexity\", \"variance\", \"entropy\", \"l2\", \"switch\"}\n assert self.topk_mode in {\"full\", \"l1_approx\", \"approx\"}\n assert self.weight_grouping in {\"none\", \"keys_only\", \"keys_and_experts\"}\n assert self.classification_target in {\"sum\", \"max\"}\n assert self.sel_norm in {\"none\", \"cos\", \"input\", \"weights\"}\n\n if selection_mode in {\"mul\"} and activation_after_topk:\n raise ValueError(\"Activation after topk is not supported with mul selection\")\n\n if self.sel_norm != \"none\" and mlp_selection:\n raise ValueError(\"normalization not supported with mlp_selection\")\n\n if std_correction and self.selection_mode in {\"add\"}:\n if key_mode == \"both\":\n self.key_std_correction = math.sqrt(3)\n else:\n self.key_std_correction = math.sqrt(2)\n elif std_correction and self.selection_mode in {\"sigmoid\", \"sinkmoid\", \"sinkmoid2\"}:\n self.key_std_correction = 2.0\n else:\n self.key_std_correction = 1.0\n\n if self.key_mode in {\"moe\", \"both\"}:\n self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))\n self.get_initializer()(self.keys, std=dmodel ** -0.5 * weight_scale * self.key_std_correction)\n else:\n self.keys = None\n\n if bias:\n self.bias = torch.nn.Parameter(torch.zeros(self.n_experts, self.expert_size))\n self.o_bias = torch.nn.Parameter(torch.zeros(self.v_dim))\n else:\n self.bias = None\n self.o_bias = None\n\n if self.key_mode in {\"shared\", \"both\"}:\n self.shared_keys = torch.nn.Parameter(torch.empty(self.k_vec_dim, self.expert_size))\n self.get_initializer()(self.shared_keys, std=dmodel ** -0.5 * weight_scale * self.key_std_correction)\n else:\n self.shared_keys = None\n\n self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))\n\n if self.mlp_selection:\n self.sel = torch.nn.Sequential(\n torch.nn.Linear(self.k_vec_dim, dmodel),\n torch.nn.ReLU(),\n torch.nn.Linear(dmodel, self.n_experts, bias=bias)\n )\n self.get_initializer()(self.sel[0].weight, std=self.k_vec_dim ** -0.5 * weight_scale * self.key_std_correction)\n self.get_initializer()(self.sel[-1].weight, std=dmodel ** -0.5 * weight_scale * self.key_std_correction)\n self.expert_sel = None\n else:\n self.sel = lambda x: F.linear(x, self.expert_sel, self.sel_bias)\n self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))\n self.sel_bias = torch.nn.Parameter(torch.zeros(self.n_experts)) if sel_bias else None\n\n self.get_initializer()(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_scale)\n\n if init_norm_mode == \"full\":\n real_size = self.size\n elif init_norm_mode == \"selected_experts\":\n real_size = self.expert_size * self.n_heads\n elif init_norm_mode == \"selected_channels\":\n real_size = self.knn\n elif init_norm_mode == \"expert_size\":\n real_size = self.expert_size\n else:\n raise ValueError(\"Unknown init_norm_mode\")\n\n real_size += n_parallel_expert_channels\n\n if expert_size_init:\n real_size = self.expert_size\n\n self.get_initializer()(self.values, std=real_size ** -0.5 * weight_scale)\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.sel_count_log = None\n\n self.register_buffer(\"kv_sel_counts\", torch.zeros(self.n_experts, self.expert_size), persistent=False)\n self.register_buffer(\"kv_sel_counts_100\", torch.zeros_like(self.kv_sel_counts))\n\n if self.rescale_normed and self.sel_norm != \"none\":\n self.sel_scale = torch.nn.Parameter(torch.ones([1]))\n else:\n self.sel_scale = 1.0\n\n if self.norm_expert_scores:\n self.expert_scale = torch.nn.Parameter(torch.full([1], math.sqrt(expert_size)))\n\n self.register_buffer(\"seq\", torch.arange(max(self.knn, self.n_heads, self.n_experts, self.k_dim, self.v_dim), dtype=torch.long), persistent=False)\n self.regroup_weights()\n\n def keys_to_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n k = keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n return k.permute(0, 2, 1).contiguous().view(-1, self.k_vec_dim)\n\n def keys_from_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n return keys.view(self.n_experts, self.expert_size, self.k_vec_dim).permute(0, 2, 1).contiguous().view(self.n_experts * self.k_vec_dim, self.expert_size)\n\n def init_sel(self, x: torch.Tensor):\n if not self.sel_input_cluster_init:\n return\n\n with torch.no_grad():\n from kmeans_pytorch import kmeans\n _, cluster_centers = kmeans(\n X=x, num_clusters=self.n_experts, distance=self.kmeans_distance, device=torch.device('cuda')\n )\n\n self.expert_sel.set_(cluster_centers.to(self.expert_sel.device).contiguous())\n if self.normalize_expert_sel_init:\n self.renorm_keep_std(self.expert_sel, dim=1)\n\n def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):\n with torch.no_grad():\n std = weight.std()\n weight.div_(weight.norm(dim=dim, keepdim=True))\n weight.mul_(std / weight.std())\n\n def regroup_weights(self) -> Optional[torch.Tensor]:\n with torch.no_grad():\n\n if self.norm_key_init:\n self.renorm_keep_std(self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size), dim=1)\n\n if self.norm_value_init:\n self.renorm_keep_std(self.values, dim=1)\n\n if self.identical_init:\n k = self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n self.keys.set_(k[:1].expand_as(k).reshape_as(self.keys))\n\n v = self.values.view(self.n_experts, self.expert_size, self.v_dim)\n self.values.set_(v[:1].expand_as(v).reshape_as(self.values))\n\n ids = None\n if self.weight_grouping != \"none\":\n # self.n_experts * self.k_vec_dim, self.expert_size\n k = self.keys_to_logical_order(self.keys)\n\n from kmeans_pytorch import kmeans\n cluster_ids_x, cluster_centers = kmeans(\n X=k, num_clusters=self.n_experts, distance=self.kmeans_distance, device=torch.device('cuda')\n )\n\n _, ids = cluster_ids_x.sort()\n k = self.keys_from_logical_order(k[ids])\n\n self.keys.set_(k.contiguous())\n self.values.set_(self.values[ids].contiguous())\n if self.weight_grouping == \"keys_and_experts\":\n self.expert_sel.set_(cluster_centers.contiguous().to(self.expert_sel.device))\n else:\n self.get_initializer()(self.expert_sel, std=self.k_vec_dim ** -0.5 * self.weight_scale)\n\n if self.normalize_expert_sel_init:\n self.renorm_keep_std(self.expert_sel, dim=1)\n\n return ids\n\n def patch_optimizer_state(self, optimizer: torch.optim.AdamW, ids: torch.Tensor):\n if self.weight_grouping == \"none\":\n return\n\n with torch.no_grad():\n ks = optimizer.state[self.keys]\n vs = optimizer.state[self.values]\n\n for p in {\"exp_avg\", \"exp_avg_sq\"}:\n k = self.keys_to_logical_order(ks[p])\n ks[p].set_(self.keys_from_logical_order(k[ids]))\n\n vs[p].set_(vs[p][ids])\n\n es = optimizer.state[self.expert_sel]\n for p in {\"exp_avg\", \"exp_avg_sq\", 'step'}:\n es[p].zero_()\n\n def get_initializer(self):\n return torch.nn.init.normal_ if self.custom_init in {0} else utils.init.trunc_normal_\n\n def sparse_matmul(self, indices: torch.Tensor, values: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:\n return F.embedding_bag(indices, weight.type_as(values), per_sample_weights=values, mode=\"sum\", sparse=False)\n\n # def sparse_matmul(self, indices: torch.Tensor, values: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:\n # sin = torch.sparse_csr_tensor(\n # crow_indices=torch.arange(0, values.nelement() + 1, values.shape[-1], device=indices.device),\n # col_indices=indices.flatten(),\n # values=values.flatten(),\n # size=(values.shape[0], weight.shape[0])\n # )\n # return sin @ weight.type_as(values)\n\n def pre_train_forward(self):\n if self.norm_keys:\n with torch.no_grad():\n self.keys.div_(self.keys.norm(dim=-1, keepdim=True))\n\n if self.topk_value_norm_compensation:\n with torch.no_grad():\n self.value_norms = self.values.norm(2, dim=-1)\n\n def topoloss(self, x: torch.Tensor) -> torch.Tensor:\n return (F.mse_loss(x[1:], x[:-1], reduction='mean') +\n F.mse_loss(x[1:], x[:-1], reduction='mean'))\n\n def ani(self, x: torch.Tensor) -> torch.Tensor:\n assert x.ndim == 2\n chunk_size = 32\n\n xnorm = F.normalize(x, 2, dim=-1)\n\n accu = 0\n for i in range(0, x.shape[0], chunk_size):\n a = xnorm[i: i + chunk_size]\n sims = xnorm @ a.T\n sims[i : i + chunk_size].fill_diagonal_(0)\n accu += sims.sum()\n\n return accu / (x.shape[0] * (x.shape[0] - 1))\n\n def log_expert_sel_usage(self, prefix: str, channel_sel_counts: torch.Tensor):\n sel_nonzero = (channel_sel_counts != 0).type(torch.float).sum(axis=-1) / self.expert_size\n self.log(f\"{prefix}/mean\", sel_nonzero.mean())\n self.log(f\"{prefix}/min\", sel_nonzero.min())\n self.log(f\"{prefix}/max\", sel_nonzero.max())\n\n\n def post_train_forward(self):\n if self.training and self.rescale_grads:\n self.values.grad.view(self.n_experts, -1).mul_(self.rescale[:, None])\n self.keys.grad.view(self.n_experts, -1).mul_(self.rescale[:, None])\n self.expert_sel.grad.mul_(self.rescale[:, None])\n\n def pre_train_forward(self):\n if self.training and not self.was_training:\n sorted_counts = self.index_sel_counts.sort(descending=True).values\n self.log(\"test_exert_channel_usage\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n self.layer = 0\n if self.sel_hist:\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n self.reg_counts = 0\n\n def before_loss(self):\n if self.sel_hist:\n # Concatenate against time dimension. Important for the within-batch regularization\n sel = torch.cat(self.sel_hist, -2)\n self.add_perplexity_reg(sel)\n\n self.sel_hist = []\n\n if self.topological_sel_reg > 0:\n self.add_reg(lambda: self.topological_sel_reg * self.topoloss(self.expert_sel))\n\n if self.topological_expert_reg > 0:\n self.add_reg(lambda: self.topological_expert_reg * (\n self.topoloss(self.keys.view(self.n_experts, -1)) +\n self.topoloss(self.values.view(self.n_experts, -1))\n ))\n\n if self.rescale_grads:\n self.rescale = 1.0 / self.index_sel_counts.clamp(min=1)\n\n # json.dumps\n\n\n if self.index_sel_norm > 0:\n if self.training:\n with torch.no_grad():\n self.log(\"usag_rel_perplexity_all_layers\", utils.relative_perplexity(self.index_sel_counts / self.index_sel_norm))\n self.log(\"dead_expert_proportion_all_layers\", (self.index_sel_counts == 0).float().sum() / self.n_experts)\n\n self.log_expert_sel_usage(\"exert_channel_usage\", self.kv_sel_counts)\n\n self.kv_sel_counts_100.add_(self.kv_sel_counts)\n self.kv_sel_counts.zero_()\n\n self.index_sel_counts_100 = self.index_sel_counts_100 + self.index_sel_counts\n self.index_sel_norm_100 = self.index_sel_norm_100 + self.index_sel_norm\n\n if self.training and self.iter % 100 == 0:\n norm_cnt = self.index_sel_counts_100 / self.index_sel_norm_100\n self.log(\"usag_rel_perplexity_100\", utils.relative_perplexity(norm_cnt))\n self.log(\"dead_expert_proportion_100\", (self.index_sel_counts_100 == 0).float().sum() / self.n_experts)\n\n sorted_counts = self.index_sel_counts_100.sort(descending=True).values\n self.log(\"usage_counts_100\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n\n self.log_expert_sel_usage(\"exert_channel_usage_100\", self.kv_sel_counts_100)\n self.kv_sel_counts_100.zero_()\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.log(\"ani/keys\", self.ani(self.keys_to_logical_order(self.keys)))\n self.log(\"ani/values\", self.ani(self.values.flatten(0, -2)))\n self.log(\"ani/expert_sel\", self.ani(self.expert_sel.T))\n\n if self.training:\n self.iter += 1\n\n def topk(self, x: torch.Tensor, k: int, approx: bool) -> Tuple[torch.Tensor, torch.Tensor]:\n if approx:\n x = x.view(*x.shape[:-1], k, -1)\n scores, ind = x.max(-1)\n return scores, self.seq[:k] * x.shape[-1] + ind\n else:\n return x.topk(k, dim=-1, sorted=False)\n\n def add_perplexity_reg(self, sel: torch.Tensor):\n sync_distributed = self.sync_distributed and (self.perplexity_reg_mode not in {\"time\", \"global_time\"})\n\n def log_mean(x: torch.Tensor, dim: int = 0):\n if sync_distributed:\n xlse = framework.utils.distributed_ops.logsumexp(x, dim=dim)\n\n # Normalize\n n = torch.tensor(x.shape[dim]).to(x.device)\n torch.distributed.all_reduce(n, op=torch.distributed.ReduceOp.SUM)\n return xlse - n.log()\n else:\n return x.logsumexp(dim) - math.log(x.shape[dim])\n\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n sel = sel.flatten(0, -3)\n else:\n sel = sel.flatten(0, -2)\n\n # Note: sel are raw logits, no matter what activation is used\n if self.perplexity_reg > 0:\n if self.reg_type == \"perplexity\":\n sel_d = F.log_softmax(sel, dim=-1)\n sel_d = log_mean(sel_d, -2)\n loss = lambda: self.perplexity_reg * ( - utils.relative_perplexity_l(sel_d).mean())\n elif self.reg_type == \"entropy\":\n sel_d = F.log_softmax(sel, dim=-1)\n sel_d = log_mean(sel_d, -2)\n loss = lambda: self.perplexity_reg * ( - utils.entropy_l(sel_d).mean())\n elif self.reg_type == \"variance\":\n if sync_distributed:\n raise NotImplementedError(\"Variance regularization is not supported in distributed mode\")\n avg_sel = sel.mean(-2)\n loss = lambda: self.perplexity_reg * avg_sel.var(-1).mean()\n elif self.reg_type == \"l2\":\n loss = lambda: self.perplexity_reg * sel.pow(2).mean()\n elif self.reg_type == \"switch\":\n if sync_distributed:\n torch.distributed.all_reduce(self.reg_counts, op=torch.distributed.ReduceOp.SUM)\n\n p_sel_real = self.reg_counts / self.reg_counts.sum(-1, keepdims=True)\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n p_sel_real = p_sel_real.unsqueeze(-2)\n\n loss = lambda: self.perplexity_reg * (F.softmax(sel, dim=-1) * p_sel_real).mean()\n self.reg_counts = 0\n else:\n assert False\n\n self.add_reg(loss, \"moe\")\n\n def compute_scores(self, input: torch.Tensor, index: CVMMSel, expert_scores: torch.Tensor, shared_score: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.keys is not None:\n # scores = self.sparse_matmul(\n # (self.seq[:input.shape[-1]] + index[:, None] * (self.k_dim // (2 if self.half_key else 1))),\n # input,\n # self.keys\n # )\n scores = cvmm(input, index, self.keys)\n if self.shared_keys is not None:\n scores = scores + shared_score\n else:\n scores = shared_score\n\n if self.bias is not None:\n scores = scores + self.bias[index.raw_sel]\n\n if self.invisible_selection:\n unmodulated_scores = scores\n scores = scores.detach()\n\n if self.selection_mode in {\"add\"}:\n with torch.no_grad():\n self.log(\"expert_key_positive_rate\", (scores > 0).type_as(scores).mean())\n scores = scores + expert_scores[..., None]\n elif self.selection_mode in {\"mul\"}:\n scores = scores * expert_scores[..., None]\n elif self.selection_mode in {\"gate\", \"sigmoid\", \"gumbel\", \"gumbel_sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"random\", \"modulate\", \"sinkmoid2\"}:\n # Handle it later\n pass\n elif self.selection_mode == \"hard_gumbel\":\n s = (torch.ones_like(expert_scores) - expert_scores).detach() + expert_scores\n scores = scores * s[..., None]\n\n if self.invisible_selection and scores is not unmodulated_scores:\n scores = unmodulated_scores + scores - scores.detach()\n\n scores = self.activation(scores)\n\n if self.norm_expert_scores:\n scores = F.normalize(scores, 1, dim=-1) * self.expert_scale\n\n if self.selection_mode in {\"gate\", \"sigmoid\", \"gumbel\", \"gumbel_sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"modulate\", \"sinkmoid2\"}:\n if self.invisible_selection:\n unmodulated_scores = scores\n scores = scores.detach()\n scores = scores * expert_scores[..., None]\n if self.invisible_selection:\n scores = unmodulated_scores + scores - scores.detach()\n\n if self.train and self.iter % 10 == 0:\n with torch.no_grad():\n gt0 = (scores > 0).float()\n gt0_s = gt0.sum()\n if self.selection_mode in {\"add\"}:\n self.log(\"k1_vs_k2_magnitude\", (scores / expert_scores[..., None]).sum() / gt0_s - 1)\n\n self.log(\"relu_pass_rate\", gt0_s / scores.numel())\n\n self.kv_sel_counts.index_add_(0, index.raw_sel.flatten(), gt0.flatten(end_dim=-2))\n\n\n # elif self.selection_mode in {\"predict_rank\"}:\n # self.add_reg(lambda: self.rank_loss(expert_scores, scores.detach().sum(-1)))\n\n if self.dropout > 0 and self.dropout_mode != \"none\":\n scores = F.dropout(scores, self.dropout, training=self.training)\n\n # indices = torch.arange(0, scores.shape[-1], device=input.device) + index[:, None] * self.expert_size\n return scores\n\n def sel_activation(self, sel: torch.Tensor, seq_len: int) -> Tuple[torch.Tensor, torch.Tensor]:\n reg_sel = sel\n if self.selection_mode in {\"gumbel\", \"hard_gumbel\"}:\n if self.training:\n sel = F.gumbel_softmax(sel)\n else:\n sel = F.softmax(sel)\n elif self.selection_mode == \"gumbel_sigmoid\":\n if self.training and (self.gumbel_decay == 0 or self.gumbel_decay > self.iter):\n noise = gumbel_sigmoid_noise(sel)\n if self.gumbel_decay:\n noise = noise * (1 - self.iter / self.gumbel_decay)\n sel = sel + noise\n else:\n sel = F.sigmoid(sel)\n elif self.selection_mode in {\"sinkhorn\", \"sinkmoid\", \"sinkmax\"}:\n if self.training:\n if self.sinkhorn_local:\n sel = sel.view(-1, seq_len, sel.shape[-1])\n\n for _ in range(self.sinkhorn_n_iters):\n if self.sinkhorn_local or (not self.sync_distributed):\n sel = sel - torch.logsumexp(sel, -2, keepdim=True)\n else:\n sel = sel - framework.utils.distributed_ops.logsumexp(sel, -2, keepdim=True)\n\n sel = sel - torch.logsumexp(sel, -1, keepdim=True)\n reg_sel = sel\n\n if self.sinkhorn_local:\n sel = sel.flatten(end_dim=-2).exp()\n\n sel = sel.exp()\n elif self.selection_mode == \"sinkmoid\":\n sel = F.sigmoid(sel)\n else:\n sel = F.softmax(sel, dim=-1)\n elif self.selection_mode in {\"sinkhorn2\", \"sinkmoid2\", \"sinkmax2\"}:\n if self.training:\n sel = self.sinkhorn(sel, self.selection_mode != \"sinkmoid2\")\n elif self.selection_mode == \"sinkmoid\":\n sel = F.sigmoid(sel)\n else:\n sel = F.softmax(sel, dim=-1)\n elif self.selection_mode in {\"sigmoid\"}:\n sel = torch.sigmoid(sel)\n elif self.selection_mode in {\"modulate\"}:\n sel = torch.tanh(sel) * (self.modulation_amplitude / 0.5) + 1\n elif self.selection_mode in {\"add\"}:\n sel = sel\n elif self.selection_mode in {\"mul\"}:\n sel = sel.abs()\n reg_sel = sel\n elif self.selection_mode in {\"gate\"}:\n sel = F.softmax(sel, dim=-1)\n with torch.no_grad():\n self.log(\"expert_rel_perplexity_per_selection\", utils.relative_perplexity(sel).mean())\n else:\n assert False\n\n return sel, reg_sel\n\n def sinkhorn(self, x: torch.Tensor, normalize:bool = True) -> torch.Tensor:\n # Based on\n A, B = x.shape[-2:]\n\n a = torch.zeros_like(x[..., 0, :])\n b = torch.zeros_like(x[..., 0])\n\n for _ in range(self.sinkhorn_n_iters):\n b = math.log(A) - (x - a[..., None, :]).logsumexp(-1)\n if self.sync_distributed:\n a = math.log(B) - framework.utils.distributed_ops.logsumexp(x - b[..., None], -2)\n else:\n a = math.log(B) - (x - b[..., None]).logsumexp(-2)\n\n r = (a[..., None, :] + b[..., None] + x).exp()\n\n if normalize and self.sync_distributed:\n A = torch.tensor(A, device=x.device)\n A = torch.distributed.reduce_all(A, op=torch.distributed.ReduceOp.SUM)\n A = A.item()\n return (r / (A * B)) if normalize else r\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n if not self.initalized:\n self.init_sel(input)\n self.initalized = True\n\n out = 0\n\n if self.half_key:\n in1 = input[..., :self.k_dim // 2]\n in2 = input[..., self.k_dim // 2:]\n else:\n in1 = in2 = input\n\n if self.selection_mode != \"random\":\n if self.block_expert_sel_in_grad:\n in1 = in1.detach()\n\n sel = self.sel(in1) * self.slope_multiplier\n\n if self.sel_norm == \"cos\":\n sel = sel / (in1.norm(dim=-1, keepdim=True) * self.expert_sel.norm(dim=-1)[None]) * self.sel_scale\n elif self.sel_norm == \"weights\":\n sel = sel * (self.sel_scale / self.expert_sel.norm(dim=-1)[None])\n elif self.sel_norm == \"input\":\n sel = sel * (self.sel_scale / in1.norm(dim=-1, keepdim=True))\n\n sel_raw = reg_sel = sel\n\n inv_val = float(\"-inf\")\n\n if (not self.activation_after_topk) or self.selection_mode in {\"sinkhorn\", \"sinkhorn2\", \"gumbel\", \"hard_gumbel\", \"gumbel_sigmoid\", \"sinkmoid\", \"sinkmax\", \"mul\", \"sinkmoid2\"}:\n # Sinkhorn should be always applied before top-k\n sel, reg_sel = self.sel_activation(sel, input.shape[-2])\n if self.selection_mode not in {\"sinkmoid\", \"sinkmoid2\"}:\n inv_val = 0\n\n if self.training and self.expert_dropout > 0:\n if self.selection_mode not in {\"sigmoid\", \"modulate\", \"gate\", \"sinkmoid\", \"sinkmoid2\"}:\n raise ValueError(\"Expert dropout not supported in this mode\")\n\n mask = torch.rand_like(sel) < self.expert_dropout\n sel2 = sel.masked_fill(mask, inv_val)\n else:\n sel2 = sel\n\n sel_val, sel_index = self.topk(sel2, self.n_heads, self.topk_mode in {\"l1_approx\", \"approx\"})\n\n if self.activation_after_topk or (self.selection_mode in {\"sinkmoid\", \"sinkmax\", \"mul\", \"sinkmoid2\"}) or (self.gumbel_select_only and self.selection_mode in {\"gumbel\", \"hard_gumbel\", \"gumbel_sigmoid\", \"gumbel_sigmoid\", \"sinkmax\"}):\n sel_val = torch.gather(sel_raw, -1, sel_index)\n if self.selection_mode in {\"gumbel_sigmoid\", \"sinkmoid\", \"sinkmoid2\"}:\n sel_val = torch.sigmoid(sel_val)\n elif self.selection_mode in {\"sinkhorn\", \"sinkhorn2\"}:\n # In case of sinkhorn, simulate the effect of post-topk activation by renormalizing\n sel_val = F.normalize(sel_val, p=1, dim=-1)\n else:\n sel_val, reg_sel = self.sel_activation(sel_val, input.shape[-2])\n else:\n sel_index = torch.randint(0, self.n_experts, (*input.shape[:-1], self.n_heads), device=input.device)\n sel_val = torch.ones_like(sel_index, dtype=input.dtype, device=input.device)\n reg_sel = None\n\n\n record_counts_now = (self.training and self.iter % 10 == 0) or (not self.training)\n\n if not self.training:\n sel_index_flat = sel_index.flatten(end_dim=-2)\n if self.coocurence is None:\n self.coocurence = torch.zeros([self.n_experts, self.n_experts], device=sel_index_flat.device, dtype=torch.long)\n\n for h1 in range(self.n_heads):\n for h2 in range(self.n_heads):\n ind_flat = sel_index_flat[..., h1] * self.n_experts + sel_index_flat[..., h2]\n values = torch.tensor([1], device=self.coocurence.device, dtype=self.coocurence.dtype).expand_as(ind_flat)\n # values = sel_val[..., h2].flatten()\n self.coocurence.flatten().put_(ind_flat, values, accumulate=True)\n # self.coocurence[sel_index_flat[..., h1], sel_index_flat[..., h2]] += 1\n\n if record_counts_now or self.reg_type == \"switch\":\n reg_counts = F.one_hot(sel_index, self.n_experts).type_as(input)\n\n if self.reg_type == \"switch\":\n reg_counts2 = reg_counts.view(*input.shape[:-2], input.shape[-2] * self.n_heads, self.n_experts)\n if self.perplexity_reg_mode == \"time\":\n reg_counts2 = reg_counts2.sum(-2)\n else:\n reg_counts2 = reg_counts2.flatten(end_dim=-2).sum(0)\n\n self.reg_counts = self.reg_counts + reg_counts2\n\n if record_counts_now:\n with torch.no_grad():\n sel_counts = reg_counts.flatten(end_dim=-2).sum(0)\n cnt = sel_index.nelement()\n\n p_expert_sel = sel_counts / cnt\n\n self.index_sel_counts = self.index_sel_counts + sel_counts\n self.index_sel_norm = self.index_sel_norm + cnt\n\n if self.training:\n self.log(\"min_sel_score\", sel_val.min(dim=-1).values.mean())\n self.log(\"max_sel_score\", sel_val.max(dim=-1).values.mean())\n\n sel_oh = F.one_hot(sel_index, self.n_experts).sum(-2).bool()\n if self.layer >= 1 and self.training:\n self.log(f\"layer_sel_overlap_{self.layer}\", ((self.prev_sel_oh & sel_oh).sum(-1).float() / self.n_heads).mean())\n\n self.prev_sel_oh = sel_oh\n\n ppl = utils.relative_perplexity(p_expert_sel)\n self.log(\"usage_rel_perplexity\", ppl)\n self.log(\"dead_expert_proportion\", (p_expert_sel == 0).float().sum() / self.n_experts)\n\n if self.perplexity_reg_mode in {\"step\", \"time\"}:\n self.add_perplexity_reg(reg_sel)\n elif self.perplexity_reg > 0 and self.training:\n self.sel_hist.append(reg_sel)\n\n shared_score = (in2 @ self.shared_keys) if self.shared_keys is not None else None\n\n scores_l = []\n\n sel_indices = [cvmm_prepare_sel(sel_index[..., h].int(), self.n_experts) for h in range(sel_index.shape[-1])]\n\n for h in range(sel_index.shape[-1]):\n hi = sel_indices[h]\n\n scores = self.compute_scores(in2, hi, sel_val[..., h], shared_score)\n scores_l.append(scores)\n\n if self.knn > 0 or self.selection_mode == \"classify\":\n with torch.no_grad():\n scores = torch.cat(scores_l, -1)\n\n if self.knn > 0:\n with torch.no_grad():\n tresh = scores.kthvalue(scores.shape[-1] - self.knn, -1).values\n\n scores_l = [s.masked_fill_(s < tresh[:, None], 0) for s in scores_l]\n\n out = 0\n for (hi, scores) in zip(sel_indices, scores_l):\n out = out + cvmm(scores, hi, self.values)\n\n # indices = torch.cat(ind_l, dim=-1)\n # scores = torch.cat(scores_l, dim=-1)\n\n if self.selection_mode == \"classify\":\n self.add_reg(lambda: self.cls_loss(sel_val, scores))\n\n # if self.knn > 0:\n # if self.topk_value_norm_compensation:\n # norms = self.value_norms[None].expand(indices.shape[0], -1).gather(-1, indices)\n # scores2 = scores * norms\n # _, ind2 = self.topk(scores2, self.knn, self.topk_mode == \"approx\")\n # indices = indices.gather(-1, ind2)\n # scores = scores.gather(-1, ind2)\n # else:\n # scores, ind2 = self.topk(scores, self.knn, self.topk_mode == \"approx\")\n # indices = indices.gather(-1, ind2)\n\n # if self.n_random > 0 and self.selection_mode not in {\"predict\", \"classify\"}:\n # with torch.no_grad():\n # rind = torch.arange(0, self.n_experts, device=input.device)\n # rind = torch.masked_select(rind, ~F.one_hot(sel_index, self.n_experts).sum(-2).bool()).view(in_flat.shape[0],-1)\n # rind = rind.gather(-1, torch.randint(0, rind.shape[-1], size=[*rind.shape[:-1], self.n_random], device=rind.device))\n\n # ind_l = [indices]\n # scores_l = [scores]\n # for i in range(self.n_random):\n # hi = rind[..., i]\n # indices, scores = self.compute_scores(in2, hi, sel.gather(-1, hi[:, None]).squeeze(), shared_score)\n\n # ind_l.append(indices)\n # scores_l.append(scores)\n\n # indices = torch.cat(ind_l, dim=-1)\n # scores = torch.cat(scores_l, dim=-1)\n\n # out = self.sparse_matmul(indices, scores, self.values)\n\n self.layer += 1\n\n self.was_training = self.training\n res = out.view(*input.shape[:-1], self.v_dim)\n if self.o_bias is not None:\n res = res + self.o_bias\n return res\n\n def dump_logs(self, save_dir: str):\n if self.coocurence is not None:\n os.makedirs(save_dir, exist_ok=True)\n torch.save(self.coocurence, os.path.join(save_dir, \"coocurence.pt\"))\n\n def get_logs(self) -> Dict[str, Any]:\n res = super().get_logs()\n\n if self.coocurence is not None:\n coo = self.coocurence / self.coocurence.diagonal().clamp(min=1)[:, None]\n res[\"expert_coocurence\"] = framework.visualize.plot.Heatmap(coo, xlabel=\"expert\", ylabel=\"expert\", textval=False)\n self.coocurence = None\n return res" }, { "identifier": "Result", "path": "interfaces/result.py", "snippet": "class Result:\n outputs: torch.Tensor\n loss: torch.Tensor\n\n batch_dim = 0\n\n def plot(self) -> Dict[str, Any]:\n return {}\n\n @property\n def batch_size(self) -> int:\n return self.outputs.shape[self.batch_dim]\n\n @staticmethod\n def merge(l: List, batch_weights: Optional[List[float]] = None):\n if len(l) == 1:\n return l[0]\n batch_weights = batch_weights if batch_weights is not None else [1] * len(l)\n loss = sum([r.loss * w for r, w in zip(l, batch_weights)]) / sum(batch_weights)\n out = torch.stack([r.outputs for r in l], l[0].batch_dim)\n return l[0].__class__(out, loss)" } ]
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_preln_kvmem_transformer import PrelnRelativeKVMemTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.topk_transformer import TopkTransformer from layers.moe_layer import MoE from interfaces import Result
18,630
parser.add_argument("-pkm.sample_smallest", default=False) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="add", choice=["add", "gate", "sigmoid", "gumbel", "hard_gumbel", "predict", "predict_mlp", "classify", "gumbel_sigmoid", "sinkhorn", "sinkhorn2", "sinkmoid", "sinkmax", "moe", "mul", "random", "sinkmoid2", "sinkmax2", "modulate"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.key_mode", default="moe", choice=["moe", "both", "shared"]) parser.add_argument("-moe.half_key", default=False) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.kmeans_distance", default='cosine', choice=['cosine', 'euclidean']) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}:
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_keys", default="128", parser=parser.int_list_parser) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-pkm.knn", default=32) parser.add_argument("-pkm.stochastic", default=False) parser.add_argument("-pkm.query_batchnorm", default=False) parser.add_argument("-pkm.custom_init", default=0) parser.add_argument("-pkm.slice_values", default=False) parser.add_argument("-pkm.slice_proj", default=False) parser.add_argument("-pkm.sample_smallest", default=False) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="add", choice=["add", "gate", "sigmoid", "gumbel", "hard_gumbel", "predict", "predict_mlp", "classify", "gumbel_sigmoid", "sinkhorn", "sinkhorn2", "sinkmoid", "sinkmax", "moe", "mul", "random", "sinkmoid2", "sinkmax2", "modulate"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.key_mode", default="moe", choice=["moe", "both", "shared"]) parser.add_argument("-moe.half_key", default=False) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.kmeans_distance", default='cosine', choice=['cosine', 'euclidean']) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}:
mklayer = lambda: PrelnRelativeTransformerEncoderLayer(
4
2023-10-16 11:26:45+00:00
24k
Jacob-Zhou/gecdi
gec/parser.py
[ { "identifier": "Dataset", "path": "gec/data.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n r\"\"\"\n Dataset that is compatible with :class:`torch.utils.data.Dataset`, serving as a wrapper for manipulating all data fields\n with the operating behaviours defined in :class:`~supar.utils.transform.Transform`.\n The data fields of all the instantiated sentences can be accessed as an attribute of the dataset.\n\n Args:\n transform (Transform):\n An instance of :class:`~supar.utils.transform.Transform` or its derivations.\n The instance holds a series of loading and processing behaviours with regard to the specific data format.\n data (Union[str, Iterable]):\n A filename or a list of instances that will be passed into :meth:`transform.load`.\n cache (bool):\n If ``True``, tries to use the previously cached binarized data for fast loading.\n In this way, sentences are loaded on-the-fly according to the meta data.\n If ``False``, all sentences will be directly loaded into the memory.\n Default: ``False``.\n binarize (bool):\n If ``True``, binarizes the dataset once building it. Only works if ``cache=True``. Default: ``False``.\n bin (str):\n Path for saving binarized files, required if ``cache=True``. Default: ``None``.\n max_len (int):\n Sentences exceeding the length will be discarded. Default: ``None``.\n kwargs (Dict):\n Together with `data`, kwargs will be passed into :meth:`transform.load` to control the loading behaviour.\n\n Attributes:\n transform (Transform):\n An instance of :class:`~supar.utils.transform.Transform`.\n sentences (List[Sentence]):\n A list of sentences loaded from the data.\n Each sentence includes fields obeying the data format defined in ``transform``.\n If ``cache=True``, each is a pointer to the sentence stored in the cache file.\n \"\"\"\n\n def __init__(\n self,\n transform: Transform,\n data: Union[str, Iterable],\n cache: bool = False,\n binarize: bool = False,\n bin: str = None,\n max_len: int = None,\n **kwargs\n ) -> Dataset:\n super(Dataset, self).__init__()\n\n self.transform = transform\n self.data = data\n self.cache = cache\n self.binarize = binarize\n self.bin = bin\n self.max_len = max_len or INF\n self.kwargs = kwargs\n\n if cache:\n if not isinstance(data, str) or not os.path.exists(data):\n raise FileNotFoundError(\"Only files are allowed for binarization, but not found\")\n if self.bin is None:\n self.fbin = data + '.pt'\n else:\n os.makedirs(self.bin, exist_ok=True)\n self.fbin = os.path.join(self.bin, os.path.split(data)[1]) + '.pt'\n if not self.binarize and os.path.exists(self.fbin):\n try:\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n except Exception:\n raise RuntimeError(f\"Error found while debinarizing {self.fbin}, which may have been corrupted. \"\n \"Try re-binarizing it first\")\n else:\n self.sentences = list(transform.load(data, **kwargs))\n\n def __repr__(self):\n s = f\"{self.__class__.__name__}(\"\n s += f\"n_sentences={len(self.sentences)}\"\n if hasattr(self, 'loader'):\n s += f\", n_batches={len(self.loader)}\"\n if hasattr(self, 'buckets'):\n s += f\", n_buckets={len(self.buckets)}\"\n if self.shuffle:\n s += f\", seed={self.seed}\"\n if self.cache:\n s += f\", cache={self.cache}\"\n if self.binarize:\n s += f\", binarize={self.binarize}\"\n if self.max_len < INF:\n s += f\", max_len={self.max_len}\"\n s += \")\"\n return s\n\n def __len__(self):\n return len(self.sentences)\n\n def __getitem__(self, index):\n return debinarize(self.fbin, self.sentences[index]) if self.cache else self.sentences[index]\n\n def __getattr__(self, name):\n if name not in {f.name for f in self.transform.flattened_fields}:\n raise AttributeError\n if self.cache:\n if os.path.exists(self.fbin) and not self.binarize:\n sentences = self\n else:\n sentences = self.transform.load(self.data, **self.kwargs)\n return (getattr(sentence, name) for sentence in sentences)\n return [getattr(sentence, name) for sentence in self.sentences]\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n @lazy_property\n def sizes(self):\n if not self.cache:\n return [s.size for s in self.sentences]\n return debinarize(self.fbin, 'sizes')\n\n def build(\n self,\n batch_size: int,\n n_buckets: int = 1,\n shuffle: bool = False,\n distributed: bool = False,\n n_workers: int = 0,\n pin_memory: bool = True,\n chunk_size: int = 1000,\n seed: int = 1,\n ) -> Dataset:\n # numericalize all fields\n if not self.cache:\n self.sentences = [i for i in self.transform(self.sentences) if len(i) < self.max_len]\n else:\n # if not forced to do binarization and the binarized file already exists, directly load the meta file\n if os.path.exists(self.fbin) and not self.binarize:\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n else:\n @contextmanager\n def cache(sentences):\n ftemp = tempfile.mkdtemp()\n fs = os.path.join(ftemp, 'sentences')\n fb = os.path.join(ftemp, os.path.basename(self.fbin))\n global global_transform\n global_transform = self.transform\n sentences = binarize({'sentences': progress_bar(sentences)}, fs)[1]['sentences']\n try:\n yield ((sentences[s:s+chunk_size], fs, f\"{fb}.{i}\", self.max_len)\n for i, s in enumerate(range(0, len(sentences), chunk_size)))\n finally:\n del global_transform\n shutil.rmtree(ftemp)\n\n def numericalize(sentences, fs, fb, max_len):\n sentences = global_transform((debinarize(fs, sentence) for sentence in sentences))\n sentences = [i for i in sentences if len(i) < max_len]\n return binarize({'sentences': sentences, 'sizes': [sentence.size for sentence in sentences]}, fb)[0]\n\n logger.info(f\"Seeking to cache the data to {self.fbin} first\")\n # numericalize the fields of each sentence\n if is_master():\n with cache(self.transform.load(self.data, **self.kwargs)) as chunks, mp.Pool(32) as pool:\n results = [pool.apply_async(numericalize, chunk) for chunk in chunks]\n self.sentences = binarize((r.get() for r in results), self.fbin, merge=True)[1]['sentences']\n if is_dist():\n dist.barrier()\n if not is_master():\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n # NOTE: the final bucket count is roughly equal to n_buckets\n self.buckets = dict(zip(*kmeans(self.sizes, n_buckets)))\n self.loader = DataLoader(transform=self.transform,\n dataset=self,\n batch_sampler=Sampler(self.buckets, batch_size, shuffle, distributed, seed=seed),\n num_workers=n_workers,\n collate_fn=collate_fn,\n pin_memory=pin_memory)\n self.seed = seed\n self.shuffle = shuffle\n return self" }, { "identifier": "map_token_ids", "path": "gec/fn.py", "snippet": "def map_token_ids(vocab_0, vocab_1, equal_labels=None):\n \"\"\"\n Map token ids from vocab_0 to vocab_1\n\n Args:\n vocab_0 (dict): vocab_0\n vocab_1 (dict): vocab_1\n equal_labels (dict): equal_labels\n \"\"\"\n if equal_labels is None:\n equal_labels = {}\n return [(i, vocab_1[equal_labels.get(k, k)]) for k, i in vocab_0.items()\n if k in vocab_1]" }, { "identifier": "PerplexityMetric", "path": "gec/metric.py", "snippet": "class PerplexityMetric(Metric):\n def __init__(self,\n loss: Optional[float] = None,\n preds: Optional[torch.Tensor] = None,\n golds: Optional[torch.Tensor] = None,\n mask: Optional[torch.BoolTensor] = None,\n reverse: bool = True,\n eps: float = 1e-12) -> PerplexityMetric:\n super().__init__(reverse=reverse, eps=eps)\n\n self.n_tokens = 0.\n\n self.tp = 0.0\n self.pred = 0.0\n self.gold = 0.0\n\n self.total_loss = 0.\n\n if loss is not None:\n self(loss, preds, golds, mask)\n\n def __repr__(self):\n s = f\"loss: {self.loss:.4f} PPL: {self.ppl:.4f}\"\n if self.tp > 0:\n s += f\" - TGT: P: {self.p:6.2%} R: {self.r:6.2%} F0.5: {self.f:6.2%}\"\n return s\n\n def __call__(self, loss: float, preds: Tuple[List, torch.Tensor],\n golds: Tuple[List, torch.Tensor],\n mask: torch.BoolTensor) -> PerplexityMetric:\n n_tokens = mask.sum().item()\n self.n += len(mask)\n self.count += 1\n self.n_tokens += n_tokens\n self.total_loss += float(loss) * n_tokens\n\n if preds is not None:\n with tempfile.TemporaryDirectory() as t:\n fsrc, fpred, fgold = os.path.join(t, 'src'), os.path.join(\n t, 'pred'), os.path.join(t, 'gold')\n pred_m2, gold_m2 = os.path.join(t, 'pred.m2'), os.path.join(\n t, 'gold.m2')\n with open(fsrc, 'w') as fs, open(fpred, 'w') as f:\n for s, i in preds:\n fs.write(s + '\\n')\n f.write(i + '\\n')\n with open(fgold, 'w') as f:\n for _, i in golds:\n f.write(i + '\\n')\n subprocess.check_output([\n 'errant_parallel', '-orig', f'{fsrc}', '-cor', f'{fpred}',\n '-out', f'{pred_m2}'\n ])\n subprocess.check_output([\n 'errant_parallel', '-orig', f'{fsrc}', '-cor', f'{fgold}',\n '-out', f'{gold_m2}'\n ])\n out = subprocess.check_output(\n [\n 'errant_compare', '-hyp', f'{pred_m2}', '-ref',\n f'{gold_m2}'\n ],\n stderr=subprocess.STDOUT).decode()\n tp, fp, fn = (int(i) for i in out.split('\\n')[3].split()[:3])\n self.tp += tp\n self.pred += tp + fp\n self.gold += tp + fn\n return self\n\n def __add__(self, other: PerplexityMetric) -> PerplexityMetric:\n metric = PerplexityMetric(eps=self.eps)\n metric.n = self.n + other.n\n metric.count = self.count + other.count\n metric.n_tokens = self.n_tokens + other.n_tokens\n metric.total_loss = self.total_loss + other.total_loss\n\n metric.tp = self.tp + other.tp\n metric.pred = self.pred + other.pred\n metric.gold = self.gold + other.gold\n metric.reverse = self.reverse or other.reverse\n return metric\n\n @property\n def score(self):\n return self.f if self.f > 0 else self.ppl\n\n @property\n def loss(self):\n return self.total_loss / self.n_tokens\n\n @property\n def ppl(self):\n return math.pow(2, (self.loss / math.log(2)))\n\n @property\n def p(self):\n return self.tp / (self.pred + self.eps)\n\n @property\n def r(self):\n return self.tp / (self.gold + self.eps)\n\n @property\n def f(self):\n return (1 + 0.5**2) * self.p * self.r / (0.5**2 * self.p + self.r +\n self.eps)" }, { "identifier": "SpanMetric", "path": "gec/metric.py", "snippet": "class SpanMetric(Metric):\n def __init__(self,\n loss: Optional[float] = None,\n preds: Optional[List[List[Tuple]]] = None,\n golds: Optional[List[List[Tuple]]] = None,\n reverse: bool = False,\n beta: Optional[float] = 1.,\n eps: float = 1e-12) -> SpanMetric:\n super().__init__(reverse=reverse, eps=eps)\n\n self.n_ucm = 0.0\n self.n_lcm = 0.0\n self.n_tr = 0.0\n self.n_fr = 0.0\n self.n_e = 0.0\n self.n_c = 0.0\n self.utp = 0.0\n self.ltp = 0.0\n self.pred = 0.0\n self.gold = 0.0\n self.beta = beta\n\n if loss is not None:\n self(loss, preds, golds)\n\n def __repr__(self):\n s = f\"ErrorSents: {self.n_e:6.0f} CorrectSents: {self.n_c:6.0f} TR: {self.tr:7.2%} FR: {self.fr:7.2%} \"\n # s += f\"GoldSpans: {self.gold:6.0f} PredSpans: {self.pred:6.0f} \"\n s += f\"UP: {self.up:7.2%} UR: {self.ur:7.2%} UF{'' if self.beta == 1.0 else self.beta}: {self.uf:7.2%} \"\n s += f\"LP: {self.lp:7.2%} LR: {self.lr:7.2%} LF{'' if self.beta == 1.0 else self.beta}: {self.lf:7.2%}\"\n return s\n\n def __call__(self, loss: float, preds: List[List[Tuple]],\n golds: List[List[Tuple]]) -> SpanMetric:\n self.n += len(preds)\n self.count += 1\n self.total_loss += float(loss)\n for pred, gold in zip(preds, golds):\n upred, ugold = Counter([tuple(span[:-1])\n for span in pred]), Counter(\n [tuple(span[:-1]) for span in gold])\n lpred, lgold = Counter([tuple(span) for span in pred\n ]), Counter([tuple(span) for span in gold])\n utp, ltp = list((upred & ugold).elements()), list(\n (lpred & lgold).elements())\n self.n_ucm += len(utp) == len(pred) == len(gold)\n self.n_lcm += len(ltp) == len(pred) == len(gold)\n self.n_tr += ((len(gold) > 0) and (len(pred) > 0))\n self.n_fr += ((len(gold) == 0) and (len(pred) > 0))\n self.n_e += (len(gold) > 0)\n self.n_c += (len(gold) == 0)\n self.utp += len(utp)\n self.ltp += len(ltp)\n self.pred += len(pred)\n self.gold += len(gold)\n return self\n\n def __add__(self, other: SpanMetric) -> SpanMetric:\n metric = SpanMetric(eps=self.eps, beta=self.beta)\n metric.n = self.n + other.n\n metric.count = self.count + other.count\n metric.total_loss = self.total_loss + other.total_loss\n metric.n_ucm = self.n_ucm + other.n_ucm\n metric.n_lcm = self.n_lcm + other.n_lcm\n metric.n_tr = self.n_tr + other.n_tr\n metric.n_fr = self.n_fr + other.n_fr\n metric.n_e = self.n_e + other.n_e\n metric.n_c = self.n_c + other.n_c\n metric.utp = self.utp + other.utp\n metric.ltp = self.ltp + other.ltp\n metric.pred = self.pred + other.pred\n metric.gold = self.gold + other.gold\n metric.reverse = self.reverse or other.reverse\n return metric\n\n @property\n def score(self):\n return self.lf\n\n @property\n def ucm(self):\n return self.n_ucm / (self.n + self.eps)\n\n @property\n def lcm(self):\n return self.n_lcm / (self.n + self.eps)\n\n @property\n def tr(self):\n return self.n_tr / (self.n_e + self.eps)\n\n @property\n def fr(self):\n return self.n_fr / (self.n_c + self.eps)\n\n @property\n def up(self):\n return self.utp / (self.pred + self.eps)\n\n @property\n def ur(self):\n return self.utp / (self.gold + self.eps)\n\n @property\n def uf(self):\n return (1 + self.beta**2) * self.utp / (self.pred +\n (self.beta**2) * self.gold +\n self.eps)\n\n @property\n def lp(self):\n return self.ltp / (self.pred + self.eps)\n\n @property\n def lr(self):\n return self.ltp / (self.gold + self.eps)\n\n @property\n def lf(self):\n return (1 + self.beta**2) * self.ltp / (self.pred +\n (self.beta**2) * self.gold +\n self.eps)" }, { "identifier": "Seq2SeqDetectModel", "path": "gec/model.py", "snippet": "class Seq2SeqDetectModel(Seq2SeqModel):\n r\"\"\"\n The implementation of Semantic Role Labeling Parser using span-constrained CRF.\n\n Args:\n n_words (int):\n The size of the word vocabulary.\n n_tags (int):\n The number of POS tags, required if POS tag embeddings are used. Default: ``None``.\n n_chars (int):\n The number of characters, required if character-level representations are used. Default: ``None``.\n n_lemmas (int):\n The number of lemmas, required if lemma embeddings are used. Default: ``None``.\n encoder (str):\n Encoder to use.\n ``'lstm'``: BiLSTM encoder.\n ``'bert'``: BERT-like pretrained language model (for finetuning), e.g., ``'bert-base-cased'``.\n Default: ``'lstm'``.\n n_embed (int):\n The size of word embeddings. Default: 100.\n n_pretrained (int):\n The size of pretrained word embeddings. Default: 125.\n n_feat_embed (int):\n The size of feature representations. Default: 100.\n n_char_embed (int):\n The size of character embeddings serving as inputs of CharLSTM, required if using CharLSTM. Default: 50.\n n_char_hidden (int):\n The size of y states of CharLSTM, required if using CharLSTM. Default: 100.\n char_pad_index (int):\n The index of the padding token in the character vocabulary, required if using CharLSTM. Default: 0.\n elmo (str):\n Name of the pretrained ELMo registered in `ELMoEmbedding.OPTION`. Default: ``'original_5b'``.\n elmo_bos_eos (tuple[bool]):\n A tuple of two boolean values indicating whether to keep start/end boundaries of elmo outputs.\n Default: ``(True, False)``.\n bert (str):\n Specifies which kind of language model to use, e.g., ``'bert-base-cased'``.\n This is required if ``encoder='bert'`` or using BERT features. The full list can be found in `transformers`_.\n Default: ``None``.\n n_bert_layers (int):\n Specifies how many last layers to use, required if ``encoder='bert'`` or using BERT features.\n The final outputs would be weighted sum of the y states of these layers.\n Default: 4.\n mix_dropout (float):\n The dropout ratio of BERT layers, required if ``encoder='bert'`` or using BERT features. Default: .0.\n bert_pooling (str):\n Pooling way to get token embeddings.\n ``first``: take the first subtoken. ``last``: take the last subtoken. ``mean``: take a mean over all.\n Default: ``mean``.\n bert_pad_index (int):\n The index of the padding token in BERT vocabulary, required if ``encoder='bert'`` or using BERT features.\n Default: 0.\n freeze (bool):\n If ``True``, freezes BERT parameters, required if using BERT features. Default: ``True``.\n embed_dropout (float):\n The dropout ratio of input embeddings. Default: .2.\n n_encoder_hidden (int):\n The size of LSTM y states. Default: 600.\n n_encoder_layers (int):\n The number of LSTM layers. Default: 3.\n encoder_dropout (float):\n The dropout ratio of encoder layer. Default: .33.\n mlp_dropout (float):\n The dropout ratio of unary edge factor MLP layers. Default: .33.\n pad_index (int):\n The index of the padding token in the word vocabulary. Default: 0.\n unk_index (int):\n The index of the unknown token in the word vocabulary. Default: 1.\n\n .. _transformers:\n https://github.com/huggingface/transformers\n \"\"\"\n\n def __init__(self,\n n_words,\n n_labels,\n n_tags=None,\n n_chars=None,\n n_lemmas=None,\n encoder='lstm',\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n char_dropout=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n freeze=True,\n embed_dropout=.33,\n n_encoder_hidden=1024,\n n_encoder_layers=3,\n encoder_dropout=.1,\n pad_index=0,\n unk_index=1,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n del self.classifier\n self.error_classifier = nn.Linear(self.model.config.d_model,\n self.args.n_labels)\n self.criterion = CrossEntropyLoss(\n label_smoothing=self.args.label_smoothing)\n\n def loss(self, x, tgt, src_error, tgt_error, src_mask, tgt_mask):\n src_mask = src_mask & True\n tgt_mask = tgt_mask & True\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n\n n_shift = 1 if self.args.encoder == 'transformer' else 2\n y, tgt_mask = y[:, n_shift:], tgt_mask[:, n_shift:]\n\n y = self.decoder_dropout(y)\n # s_src_error = self.error_classifier(x[:, 1:-1])\n s_tgt_error = self.error_classifier(y)\n\n # src_mask = src_mask[:, 2:]\n\n if \"partial\" in self.args.error_schema:\n # src_mask = src_mask & (src_error != self.args.nul_index)\n tgt_mask = tgt_mask & (tgt_error != self.args.nul_index)\n # src_error_loss = self.criterion(s_src_error[src_mask], src_error[src_mask])\n tgt_error_loss = self.criterion(s_tgt_error[tgt_mask],\n tgt_error[tgt_mask])\n # return src_error_loss + tgt_error_loss\n return tgt_error_loss\n\n def decode(self, x, tgt, src_mask, tgt_mask):\n src_mask = src_mask & True\n tgt_mask = tgt_mask & True\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n\n n_shift = 1 if self.args.encoder == 'transformer' else 2\n y, mask = y[:, n_shift:], tgt_mask[:, n_shift:]\n\n s_errors = self.error_classifier(y)\n if \"partial\" in self.args.error_schema:\n s_errors[...,\n self.args.nul_index] = torch.finfo(s_errors.dtype).min\n errors = s_errors.argmax(-1)\n errors[~mask] = -1\n\n return errors" }, { "identifier": "Seq2SeqModel", "path": "gec/model.py", "snippet": "class Seq2SeqModel(Model):\n r\"\"\"\n The implementation of Semantic Role Labeling Parser using span-constrained CRF.\n\n Args:\n n_words (int):\n The size of the word vocabulary.\n n_tags (int):\n The number of POS tags, required if POS tag embeddings are used. Default: ``None``.\n n_chars (int):\n The number of characters, required if character-level representations are used. Default: ``None``.\n n_lemmas (int):\n The number of lemmas, required if lemma embeddings are used. Default: ``None``.\n encoder (str):\n Encoder to use.\n ``'lstm'``: BiLSTM encoder.\n ``'bert'``: BERT-like pretrained language model (for finetuning), e.g., ``'bert-base-cased'``.\n Default: ``'lstm'``.\n n_embed (int):\n The size of word embeddings. Default: 100.\n n_pretrained (int):\n The size of pretrained word embeddings. Default: 125.\n n_feat_embed (int):\n The size of feature representations. Default: 100.\n n_char_embed (int):\n The size of character embeddings serving as inputs of CharLSTM, required if using CharLSTM. Default: 50.\n n_char_hidden (int):\n The size of y states of CharLSTM, required if using CharLSTM. Default: 100.\n char_pad_index (int):\n The index of the padding token in the character vocabulary, required if using CharLSTM. Default: 0.\n elmo (str):\n Name of the pretrained ELMo registered in `ELMoEmbedding.OPTION`. Default: ``'original_5b'``.\n elmo_bos_eos (tuple[bool]):\n A tuple of two boolean values indicating whether to keep start/end boundaries of elmo outputs.\n Default: ``(True, False)``.\n bert (str):\n Specifies which kind of language model to use, e.g., ``'bert-base-cased'``.\n This is required if ``encoder='bert'`` or using BERT features. The full list can be found in `transformers`_.\n Default: ``None``.\n n_bert_layers (int):\n Specifies how many last layers to use, required if ``encoder='bert'`` or using BERT features.\n The final outputs would be weighted sum of the y states of these layers.\n Default: 4.\n mix_dropout (float):\n The dropout ratio of BERT layers, required if ``encoder='bert'`` or using BERT features. Default: .0.\n bert_pooling (str):\n Pooling way to get token embeddings.\n ``first``: take the first subtoken. ``last``: take the last subtoken. ``mean``: take a mean over all.\n Default: ``mean``.\n bert_pad_index (int):\n The index of the padding token in BERT vocabulary, required if ``encoder='bert'`` or using BERT features.\n Default: 0.\n freeze (bool):\n If ``True``, freezes BERT parameters, required if using BERT features. Default: ``True``.\n embed_dropout (float):\n The dropout ratio of input embeddings. Default: .2.\n n_encoder_hidden (int):\n The size of LSTM y states. Default: 600.\n n_encoder_layers (int):\n The number of LSTM layers. Default: 3.\n encoder_dropout (float):\n The dropout ratio of encoder layer. Default: .33.\n mlp_dropout (float):\n The dropout ratio of unary edge factor MLP layers. Default: .33.\n pad_index (int):\n The index of the padding token in the word vocabulary. Default: 0.\n unk_index (int):\n The index of the unknown token in the word vocabulary. Default: 1.\n\n .. _transformers:\n https://github.com/huggingface/transformers\n \"\"\"\n\n def __init__(self,\n n_words,\n n_tags=None,\n n_chars=None,\n n_lemmas=None,\n encoder='lstm',\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n char_dropout=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n freeze=True,\n embed_dropout=.33,\n n_encoder_hidden=512,\n n_encoder_layers=3,\n encoder_dropout=.1,\n pad_index=0,\n unk_index=1,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n if self.args.encoder == 'transformer':\n self.token_dropout = TokenDropout(self.args.token_dropout)\n self.decoder = TransformerDecoder(\n layer=TransformerDecoderLayer(\n n_heads=self.args.n_decoder_heads,\n n_model=self.args.n_decoder_hidden,\n n_inner=self.args.n_decoder_inner,\n dropout=self.args.decoder_dropout),\n n_layers=self.args.n_decoder_layers)\n\n else:\n from transformers import AutoModel\n self.model = AutoModel.from_pretrained(self.args.bart,\n dropout=self.args.dropout)\n self.encoder, self.decoder = self.model.encoder, self.model.decoder\n self.decoder_dropout = nn.Dropout(self.args.decoder_dropout)\n self.classifier = nn.Linear(self.args.n_encoder_hidden,\n self.args.n_words)\n self.classifier.weight = (self.word_embed.embed\n if self.args.encoder == 'transformer' else\n self.model.shared).weight\n self.criterion = CrossEntropyLoss(\n label_smoothing=self.args.label_smoothing)\n\n def forward(self, words):\n r\"\"\"\n Args:\n words (~torch.LongTensor): ``[batch_size, seq_len]``.\n Word indices.\n\n Returns:\n ~torch.Tensor:\n Representations for the src sentences of the shape ``[batch_size, seq_len, n_model]``.\n \"\"\"\n # we need to do token dropout, so the TranformerWordEmbedding layer is not invoked here\n if self.args.encoder == 'transformer':\n embed = self.token_dropout(self.word_embed.embed(words))\n embed = embed * self.word_embed.embed_scale + self.word_embed.pos_embed(\n embed)\n embed = self.embed_dropout(embed)\n return self.encoder(embed, words.ne(self.args.pad_index))\n else:\n return self.encoder(input_ids=words,\n attention_mask=words.ne(\n self.args.pad_index))[0]\n\n def loss(self, x, tgt, src_mask, tgt_mask):\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n y = self.decoder_dropout(y)\n s_y = self.classifier(y)\n return self.criterion(s_y[tgt_mask], tgt[tgt_mask])\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (tuple(\n past_state.index_select(0, beam_idx)\n for past_state in layer_past), )\n return reordered_past\n\n def decode(self, x, src_mask):\n batch_size, *_ = x.shape\n beam_size, n_words = self.args.beam_size, self.args.n_words\n\n # repeat the src inputs beam_size times\n # [batch_size * beam_size, ...]\n x = x.unsqueeze(1).repeat(1, beam_size, 1, 1).view(-1, *x.shape[1:])\n src_mask = src_mask.unsqueeze(1).repeat(1, beam_size, 1).view(\n -1, *src_mask.shape[1:])\n # initialize the tgt inputs by <bos>\n # [batch_size * beam_size, seq_len]\n tgt = x.new_full((batch_size * beam_size, 1),\n self.args.bos_index,\n dtype=torch.long)\n # [batch_size * beam_size]\n active = src_mask.new_ones(batch_size * beam_size)\n # [batch_size]\n batches = tgt.new_tensor(range(batch_size)) * beam_size\n # accumulated scores\n scores = x.new_full((batch_size, self.args.beam_size),\n MIN).index_fill_(-1, tgt.new_tensor(0), 0).view(-1)\n\n def rank(scores, mask, k):\n scores = scores / mask.sum(-1).unsqueeze(\n -1)**self.args.length_penalty\n return scores.view(batch_size, -1).topk(k, -1)[1]\n\n if self.args.encoder != 'transformer':\n past_key_values = self.decoder(\n input_ids=torch.full_like(tgt[:, :1], self.args.eos_index),\n attention_mask=torch.ones_like(src_mask[:, :1]),\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask,\n past_key_values=None,\n use_cache=True)[1]\n\n for t in range(1, min(self.args.max_len + 1, int(1.8 * x.shape[1]))):\n tgt_mask = tgt.ne(self.args.pad_index)\n if self.args.encoder == 'transformer':\n attn_mask = tgt_mask.new_ones(t, t).tril_()\n s_y = self.decoder(self.embed(tgt[active]), x[active],\n tgt_mask[active], src_mask[active],\n attn_mask)\n # [n_active, n_words]\n s_y = self.classifier(s_y[:, -1]).log_softmax(-1)\n # only allow finished sequences to get <pad>\n # [batch_size * beam_size, n_words]\n s_y = x.new_full((batch_size * beam_size, n_words),\n MIN).masked_scatter_(active.unsqueeze(-1),\n s_y)\n else:\n input_ids = tgt[:, -1:]\n s_y, new_past_key_values = self.decoder(\n input_ids=input_ids,\n attention_mask=torch.cat(\n (torch.ones_like(tgt_mask[:, :1]), tgt_mask), 1),\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask,\n past_key_values=past_key_values,\n use_cache=True)[:2]\n del past_key_values\n past_key_values = new_past_key_values\n # [n_active, n_words]\n s_y = self.classifier(s_y[:, -1]).log_softmax(-1)\n # only allow finished sequences to get <pad>\n s_y[~active] = MIN\n\n s_y[~active, self.args.pad_index] = 0\n\n # [batch_size * beam_size, n_words]\n scores = scores.unsqueeze(-1) + s_y\n # [batch_size, beam_size]\n cands = rank(scores, tgt_mask, beam_size)\n # [batch_size * beam_size]\n scores = scores.view(batch_size, -1).gather(-1, cands).view(-1)\n # beams, tokens = cands // n_words, cands % n_words\n beams, tokens = cands.div(\n n_words, rounding_mode='floor'), (cands % n_words).view(-1, 1)\n indices = (batches.unsqueeze(-1) + beams).view(-1)\n # [batch_size * beam_size, seq_len + 1]\n tgt = torch.cat((tgt[indices], tokens), 1)\n past_key_values = self._reorder_cache(past_key_values, indices)\n active = tokens.ne(\n tokens.new_tensor(\n (self.args.eos_index, self.args.pad_index))).all(-1)\n\n if not active.any():\n break\n cands = rank(scores.view(-1, 1), tgt.ne(self.args.pad_index),\n self.args.topk)\n return tgt[(batches.unsqueeze(-1) + cands).view(-1)].view(\n batch_size, self.args.topk, -1)" }, { "identifier": "Field", "path": "gec/transform.py", "snippet": "class Field(supar.utils.Field):\n r\"\"\"\n Defines a datatype together with instructions for converting to :class:`~torch.Tensor`.\n :class:`Field` models common text processing datatypes that can be represented by tensors.\n It holds a :class:`~supar.utils.vocab.Vocab` object that defines the set of possible values\n for elements of the field and their corresponding numerical representations.\n The :class:`Field` object also holds other parameters relating to how a datatype\n should be numericalized, such as a tokenization method.\n\n Args:\n name (str):\n The name of the field.\n pad_token (str):\n The string token used as padding. Default: ``None``.\n unk_token (str):\n The string token used to represent OOV words. Default: ``None``.\n bos_token (str):\n A token that will be prepended to every example using this field, or ``None`` for no `bos_token`.\n Default: ``None``.\n eos_token (str):\n A token that will be appended to every example using this field, or ``None`` for no `eos_token`.\n lower (bool):\n Whether to lowercase the text in this field. Default: ``False``.\n use_vocab (bool):\n Whether to use a :class:`~supar.utils.vocab.Vocab` object.\n If ``False``, the data in this field should already be numerical.\n Default: ``True``.\n tokenize (function):\n The function used to tokenize strings using this field into sequential examples. Default: ``None``.\n fn (function):\n The function used for preprocessing the examples. Default: ``None``.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.padding_side = kwargs.pop('padding_side') if 'padding_side' in kwargs else 'right'\n super().__init__(*args, **kwargs)\n\n def compose(self, batch: Iterable[torch.Tensor]) -> torch.Tensor:\n r\"\"\"\n Composes a batch of sequences into a padded tensor.\n\n Args:\n batch (Iterable[~torch.Tensor]):\n A list of tensors.\n\n Returns:\n A padded tensor converted to proper device.\n \"\"\"\n\n return pad(batch, self.pad_index, padding_side=self.padding_side).to(self.device, non_blocking=True)" }, { "identifier": "Text", "path": "gec/transform.py", "snippet": "class Text(Transform):\n\n fields = ['SRC', 'TGT']\n\n def __init__(\n self,\n SRC: Optional[Union[Field, Iterable[Field]]] = None,\n TGT: Optional[Union[Field, Iterable[Field]]] = None\n ) -> Text:\n super().__init__()\n\n self.SRC = SRC\n self.TGT = TGT\n\n @property\n def src(self):\n return self.SRC,\n\n @property\n def tgt(self):\n return self.TGT,\n\n def load(\n self,\n data: Union[str, Iterable],\n lang: Optional[str] = None,\n **kwargs\n ) -> Iterable[TextSentence]:\n r\"\"\"\n Loads the data in Text-X format.\n Also supports for loading data from Text-U file with comments and non-integer IDs.\n\n Args:\n data (str or Iterable):\n A filename or a list of instances.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n\n Returns:\n A list of :class:`TextSentence` instances.\n \"\"\"\n\n if lang is not None:\n tokenizer = Tokenizer(lang)\n if isinstance(data, str) and os.path.exists(data):\n f = open(data)\n if data.endswith('.txt'):\n lines = (i\n for s in f\n if len(s) > 1\n for i in StringIO((s.split() if lang is None else tokenizer(s)) + '\\n'))\n else:\n lines = f\n else:\n if lang is not None:\n data = [tokenizer(s) for s in ([data] if isinstance(data, str) else data)]\n else:\n data = [data] if isinstance(data[0], str) else data\n lines = (i for s in data for i in StringIO(s + '\\n'))\n\n index, sentence = 0, []\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n sentence = TextSentence(self, sentence, index)\n yield sentence\n index += 1\n sentence = []\n else:\n sentence.append(line)" }, { "identifier": "Tree", "path": "gec/transform.py", "snippet": "class Tree(Transform):\n\n fields = ['SRC', 'TGT', 'SRCERROR', 'TGTERROR']\n\n def __init__(\n self,\n SRC: Optional[Union[Field, Iterable[Field]]] = None,\n TGT: Optional[Union[Field, Iterable[Field]]] = None,\n SRCERROR: Optional[Union[Field, Iterable[Field]]] = None,\n TGTERROR: Optional[Union[Field, Iterable[Field]]] = None,\n **kwargs\n ) -> Tree:\n super().__init__()\n self.error_schema = kwargs.pop('error_schema') if 'error_schema' in kwargs else 'last'\n self.fine_error_type = kwargs.pop('fine_error_type') if 'fine_error_type' in kwargs else False\n\n self.SRC = SRC\n self.TGT = TGT\n self.SRCERROR = SRCERROR\n self.TGTERROR = TGTERROR\n\n @property\n def src(self):\n return self.SRC, self.TGT\n\n @property\n def tgt(self):\n return self.SRCERROR, self.TGTERROR\n\n def load(\n self,\n data: Union[str, Iterable],\n lang: Optional[str] = None,\n **kwargs\n ) -> Iterable[TextSentence]:\n r\"\"\"\n Loads the data in Text-X format.\n Also supports for loading data from Text-U file with comments and non-integer IDs.\n\n Args:\n data (Union[str, Iterable]):\n A filename or a list of instances.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n\n Returns:\n A list of :class:`TextSentence` instances.\n \"\"\"\n\n if lang is not None:\n tokenizer = Tokenizer(lang)\n if isinstance(data, str) and os.path.exists(data):\n f = open(data)\n if data.endswith('.txt'):\n lines = (i\n for s in f\n if len(s) > 1\n for i in StringIO((s.split() if lang is None else tokenizer(s)) + '\\n'))\n else:\n lines = f\n else:\n if lang is not None:\n data = [tokenizer(s) for s in ([data] if isinstance(data, str) else data)]\n else:\n data = [data] if isinstance(data[0], str) else data\n lines = (i for s in data for i in StringIO(s + '\\n'))\n\n def consume(lines, chunksize=10000):\n index, sentence, chunk = 0, [], []\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n chunk.append((sentence, index))\n if len(chunk) == chunksize:\n yield chunk\n chunk = []\n index += 1\n sentence = []\n else:\n sentence.append(line)\n if len(chunk) > 0:\n yield chunk\n\n @contextmanager\n def cache(lines):\n global global_transform\n global_transform = self\n ftemp = tempfile.mkdtemp()\n fbin = os.path.join(ftemp, 'data')\n try:\n yield ((chunk, f\"{fbin}.{i}\") for i, chunk in enumerate(consume(lines))), fbin\n finally:\n if dist.is_initialized() and not is_master():\n dist.barrier()\n del global_transform\n shutil.rmtree(ftemp)\n\n with cache(lines) as (chunks, fbin):\n if is_master():\n def process(chunk, fb):\n sentences = [TreeSentence(global_transform, *s) for s in progress_bar(chunk)]\n sentences = [s for s in sentences if s.vaild]\n return binarize({'sentences': sentences}, fb)[0]\n with mp.Pool(32) as pool:\n results = [pool.apply_async(process, (chunk, fb)) for chunk, fb in chunks]\n binarize((r.get() for r in results), fbin, merge=True)\n if dist.is_initialized() and not is_master():\n fbin = gather(fbin)[0]\n dist.barrier()\n for s in debinarize(fbin, meta=True)['sentences']:\n yield debinarize(fbin, s)" } ]
import os import shutil import tempfile import math import dill import torch import torch.distributed as dist from datetime import datetime, timedelta from typing import Iterable, Union from gec.data import Dataset from gec.fn import map_token_ids from supar.parser import Parser from supar.utils import Config from supar.utils.common import MIN, NUL, UNK from supar.utils.field import RawField from supar.utils.fn import set_rng_state from supar.utils.logging import get_logger, init_logger, progress_bar from supar.utils.metric import Metric from supar.utils.optim import PolynomialLR from supar.utils.parallel import DistributedDataParallel as DDP, gather, is_dist from supar.utils.parallel import is_master from supar.utils.tokenizer import TransformerTokenizer from supar.utils.transform import AttachJuxtaposeTree, Batch from torch.cuda.amp import GradScaler from torch.optim import AdamW from torch.optim.lr_scheduler import ExponentialLR from torch.nn.functional import embedding from .metric import PerplexityMetric, SpanMetric from .model import Seq2SeqDetectModel, Seq2SeqModel from .transform import Field, Text, Tree from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from transformers import AutoTokenizer, GPT2LMHeadModel
14,498
] for tgt_tree, tgt_chart in zip(tgt_trees, tgt_charts)] return batch @classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): r""" Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments. """ args = Config(**locals()) os.makedirs(os.path.dirname(path) or './', exist_ok=True) if os.path.exists(path) and not args.build: return cls.load(**args) state = torch.load(args.checkpoint_path, map_location='cpu') args = state['args'].update(args) if args.bin_path is None: bin = os.path.join(os.path.dirname(args.path), 'bin') else: bin = args.bin_path fbin = os.path.join(bin, 'transform') + '.pt' if args.cache and os.path.exists(fbin): transform = torch.load(fbin, map_location='cpu') else: transform = state['transform'] t = transform.SRC.tokenize SRC = Field('src', pad=t.pad, unk=t.unk, bos=t.bos, eos=t.eos, tokenize=t) TGT = Field('tgt', pad=t.pad, unk=t.unk, bos=t.bos, eos=t.eos, tokenize=t) SRC_ERROR_RAW = RawField('src_error_raw') SRC_ERROR = Field('src_error') TGT_ERROR_RAW = RawField('tgt_error_raw') TGT_ERROR = Field('tgt_error') transform = Tree(SRC=SRC, TGT=TGT, SRCERROR=(SRC_ERROR_RAW, SRC_ERROR), TGTERROR=(TGT_ERROR_RAW, TGT_ERROR), error_schema=args.error_schema) train = Dataset(transform, args.train, **args) # share the vocab SRC.vocab = TGT.vocab = t.vocab SRC_ERROR = SRC_ERROR.build(train) TGT_ERROR = TGT_ERROR.build(train) SRC_ERROR.vocab = TGT_ERROR.vocab.update(SRC_ERROR.vocab) logger.info(f"{transform}") if args.cache: os.makedirs(bin, exist_ok=True) torch.save(transform, fbin, pickle_module=dill) SRC = transform.SRC (_, TGT_ERROR) = transform.TGTERROR args.update({ 'n_words': len(SRC.vocab), 'n_labels': len(TGT_ERROR.vocab), 'pad_index': SRC.pad_index, 'unk_index': SRC.unk_index, 'bos_index': SRC.bos_index, 'eos_index': SRC.eos_index, 'correct_index': TGT_ERROR.vocab['CORRECT'], }) if "partial" in args.error_schema: args.update({ 'nul_index': TGT_ERROR.vocab[NUL], }) logger.info("Building the model") model = cls.MODEL(**args) if args.gec_init: logger.info("Init the model with gec params") model.load_pretrained(state['pretrained']) model.load_state_dict(state['state_dict'], False) else: logger.info("Original Bart params") logger.info(f"{model}\n") parser = cls(args, model, transform) parser.model.to(parser.device) return parser class Seq2seqIntervenedParser(Parser): def __init__(self, args, gec_model, transform): self.gec_model = gec_model self.args = gec_model.args.update(args) self.transform = transform if self.args.lm_alpha > 0: self.lm_model = GPT2LMHeadModel.from_pretrained( self.args.lm_path).to(self.device) self.lm_model.eval() gpt2_tokenizer = AutoTokenizer.from_pretrained(self.args.lm_path) if self.args.lm_path == "IDEA-CCNL/Wenzhong2.0-GPT2-110M-BertTokenizer-chinese":
# -*- coding: utf-8 -*- logger = get_logger(__name__) class Seq2SeqParser(Parser): NAME = 'seq2seq' MODEL = Seq2SeqModel def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.SRC = self.transform.SRC self.TGT = self.transform.TGT def train(self, train: Union[str, Iterable], dev: Union[str, Iterable], test: Union[str, Iterable], epochs: int, patience: int, batch_size: int = 5000, update_steps: int = 1, buckets: int = 32, workers: int = 0, clip: float = 5.0, amp: bool = False, cache: bool = False, verbose: bool = True, **kwargs) -> None: args = self.args.update(locals()) init_logger(logger, verbose=args.verbose) self.transform.train() batch_size = batch_size // update_steps if dist.is_initialized(): batch_size = batch_size // dist.get_world_size() logger.info("Loading the data") if args.cache: args.bin = os.path.join(os.path.dirname(args.path), 'bin') train = Dataset(self.transform, args.train, **args).build(batch_size, buckets, True, dist.is_initialized(), workers, chunk_size=args.chunk_size, seed=args.seed) dev = Dataset(self.transform, args.dev, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'train:':6} {train}") if not args.test: logger.info(f"{'dev:':6} {dev}\n") else: test = Dataset(self.transform, args.test, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'dev:':6} {dev}") logger.info(f"{'test:':6} {test}\n") self.optimizer = AdamW(self.model.parameters(), args.lr, (args.mu, args.nu), args.eps, args.weight_decay) steps = len(train.loader) * epochs // args.update_steps self.scheduler = PolynomialLR(self.optimizer, warmup_steps=self.args.warmup_steps, steps=steps) self.scaler = GradScaler(enabled=args.amp) if dist.is_initialized(): self.model = DDP(self.model, device_ids=[args.local_rank], find_unused_parameters=args.get( 'find_unused_parameters', True)) if args.amp: self.model.register_comm_hook(dist.group.WORLD, fp16_compress_hook) self.step, self.epoch, self.best_e, self.patience, self.n_batches = 1, 1, 1, patience, len( train.loader) self.best_metric, self.elapsed = Metric(), timedelta() if self.args.checkpoint: try: self.optimizer.load_state_dict( self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def evaluate(self, data: Union[str, Iterable], batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, punct: bool = False, tree: bool = True, proj: bool = False, partial: bool = False, verbose: bool = True, **kwargs): return super().evaluate(**Config().update(locals())) def predict(self, data: Union[str, Iterable], pred: str = None, lang: str = None, prob: bool = False, batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, tree: bool = True, proj: bool = False, verbose: bool = True, **kwargs): return super().predict(**Config().update(locals())) def train_step(self, batch: Batch) -> torch.Tensor: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) preds = golds = None if self.args.eval_tgt: golds = [(s.values[0], s.values[1]) for s in batch.sentences] preds = [(s.values[0], self.TGT.tokenize.decode(i[0])) for s, i in zip(batch.sentences, self.model.decode(x, batch.mask).tolist()) ] return PerplexityMetric(loss, preds, golds, tgt_mask, not self.args.eval_tgt) @torch.no_grad() def pred_step(self, batch: Batch) -> Batch: src, = batch x = self.model(src) tgt = self.model.decode(x, batch.mask) batch.tgt = [[self.TGT.tokenize.decode(cand) for cand in i] for i in tgt.tolist()] return batch @classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): r""" Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments. """ args = Config(**locals()) os.makedirs(os.path.dirname(path) or './', exist_ok=True) if os.path.exists(path) and not args.build: return cls.load(**args) logger.info("Building the fields") t = TransformerTokenizer(name=args.bart) SRC = Field('src', pad=t.pad, unk=t.unk, bos=t.bos, eos=t.eos, tokenize=t) TGT = Field('tgt', pad=t.pad, unk=t.unk, bos=t.bos, eos=t.eos, tokenize=t) transform = Text(SRC=SRC, TGT=TGT) # share the vocab SRC.vocab = TGT.vocab = t.vocab args.update({ 'n_words': len(SRC.vocab), 'pad_index': SRC.pad_index, 'unk_index': SRC.unk_index, 'bos_index': SRC.bos_index, 'eos_index': SRC.eos_index }) logger.info(f"{transform}") logger.info("Building the model") model = cls.MODEL(**args) logger.info(f"{model}\n") parser = cls(args, model, transform) parser.model.to(parser.device) return parser class Seq2SeqDetector(Seq2SeqParser): NAME = 'seq2seq' MODEL = Seq2SeqDetectModel def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.SRC = self.transform.SRC self.TGT = self.transform.TGT (_, self.TGT_ERROR) = self.transform.TGTERROR def train(self, train: Union[str, Iterable], dev: Union[str, Iterable], test: Union[str, Iterable], epochs: int, patience: int, batch_size: int = 5000, update_steps: int = 1, buckets: int = 32, workers: int = 0, clip: float = 5.0, amp: bool = False, cache: bool = False, verbose: bool = True, **kwargs) -> None: args = self.args.update(locals()) init_logger(logger, verbose=args.verbose) self.transform.train() batch_size = batch_size // update_steps if dist.is_initialized(): batch_size = batch_size // dist.get_world_size() logger.info("Loading the data") if args.cache: if args.bin_path is None: args.bin = os.path.join(os.path.dirname(args.path), 'bin') else: args.bin = args.bin_path train = Dataset(self.transform, args.train, **args).build(batch_size, buckets, True, dist.is_initialized(), workers, chunk_size=args.chunk_size, seed=args.seed) dev = Dataset(self.transform, args.dev, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'train:':6} {train}") if not args.test: logger.info(f"{'dev:':6} {dev}\n") else: test = Dataset(self.transform, args.test, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'dev:':6} {dev}") logger.info(f"{'test:':6} {test}\n") def ged_param(name): if name.startswith("encoder."): return False elif name.startswith("decoder."): return False else: return True no_decay = [] self.optimizer = AdamW([{ 'params': p, 'lr': args.lr * (1 if not ged_param(n) else args.lr_rate), "weight_decay": args.weight_decay if not any(nd in n for nd in no_decay) else 0.0, } for n, p in self.model.named_parameters()], args.lr, (args.mu, args.nu), args.eps, args.weight_decay) self.scheduler = ExponentialLR(self.optimizer, args.decay**(1 / args.decay_steps)) self.scaler = GradScaler(enabled=args.amp) if dist.is_initialized(): self.model = DDP(self.model, device_ids=[args.local_rank], find_unused_parameters=args.get( 'find_unused_parameters', True)) if args.amp: self.model.register_comm_hook(dist.group.WORLD, fp16_compress_hook) self.step, self.epoch, self.best_e, self.patience, self.n_batches = 1, 1, 1, patience, len( train.loader) self.best_metric, self.elapsed = Metric(), timedelta() if self.args.checkpoint: try: self.optimizer.load_state_dict( self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def train_step(self, batch: Batch) -> torch.Tensor: src, tgt, _, src_error, _, tgt_error = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_error, tgt_error, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt, _, src_error, tgt_error_raw, tgt_error = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_error, tgt_error, src_mask, tgt_mask) def error_label_factorize(errors): return sum( [[(i, i + 1, e) for e in eb.split("::")] for i, eb in enumerate(errors) if eb not in {'CORRECT', NUL}], []) ged_golds = [error_label_factorize(e) for e in tgt_error_raw] ged_preds = [ error_label_factorize( [self.TGT_ERROR.vocab[i] for i in e if i >= 0]) for e in self.model.decode(x, tgt, src_mask, tgt_mask).tolist() ] return SpanMetric(loss, ged_preds, ged_golds) @torch.no_grad() def pred_step(self, batch: Batch) -> Batch: # src, = batch src, tgt = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) tgt_charts = self.model.decode_syn(x, tgt, src_mask, tgt_mask) tgt_trees = [s.tgt_tree for s in batch.sentences] batch.tgt = [[ AttachJuxtaposeTree.build(tgt_tree, [(i, j, self.NEW.vocab[label]) for i, j, label in tgt_chart], {UNK, NUL}).pformat(100000000000) ] for tgt_tree, tgt_chart in zip(tgt_trees, tgt_charts)] return batch @classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): r""" Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments. """ args = Config(**locals()) os.makedirs(os.path.dirname(path) or './', exist_ok=True) if os.path.exists(path) and not args.build: return cls.load(**args) state = torch.load(args.checkpoint_path, map_location='cpu') args = state['args'].update(args) if args.bin_path is None: bin = os.path.join(os.path.dirname(args.path), 'bin') else: bin = args.bin_path fbin = os.path.join(bin, 'transform') + '.pt' if args.cache and os.path.exists(fbin): transform = torch.load(fbin, map_location='cpu') else: transform = state['transform'] t = transform.SRC.tokenize SRC = Field('src', pad=t.pad, unk=t.unk, bos=t.bos, eos=t.eos, tokenize=t) TGT = Field('tgt', pad=t.pad, unk=t.unk, bos=t.bos, eos=t.eos, tokenize=t) SRC_ERROR_RAW = RawField('src_error_raw') SRC_ERROR = Field('src_error') TGT_ERROR_RAW = RawField('tgt_error_raw') TGT_ERROR = Field('tgt_error') transform = Tree(SRC=SRC, TGT=TGT, SRCERROR=(SRC_ERROR_RAW, SRC_ERROR), TGTERROR=(TGT_ERROR_RAW, TGT_ERROR), error_schema=args.error_schema) train = Dataset(transform, args.train, **args) # share the vocab SRC.vocab = TGT.vocab = t.vocab SRC_ERROR = SRC_ERROR.build(train) TGT_ERROR = TGT_ERROR.build(train) SRC_ERROR.vocab = TGT_ERROR.vocab.update(SRC_ERROR.vocab) logger.info(f"{transform}") if args.cache: os.makedirs(bin, exist_ok=True) torch.save(transform, fbin, pickle_module=dill) SRC = transform.SRC (_, TGT_ERROR) = transform.TGTERROR args.update({ 'n_words': len(SRC.vocab), 'n_labels': len(TGT_ERROR.vocab), 'pad_index': SRC.pad_index, 'unk_index': SRC.unk_index, 'bos_index': SRC.bos_index, 'eos_index': SRC.eos_index, 'correct_index': TGT_ERROR.vocab['CORRECT'], }) if "partial" in args.error_schema: args.update({ 'nul_index': TGT_ERROR.vocab[NUL], }) logger.info("Building the model") model = cls.MODEL(**args) if args.gec_init: logger.info("Init the model with gec params") model.load_pretrained(state['pretrained']) model.load_state_dict(state['state_dict'], False) else: logger.info("Original Bart params") logger.info(f"{model}\n") parser = cls(args, model, transform) parser.model.to(parser.device) return parser class Seq2seqIntervenedParser(Parser): def __init__(self, args, gec_model, transform): self.gec_model = gec_model self.args = gec_model.args.update(args) self.transform = transform if self.args.lm_alpha > 0: self.lm_model = GPT2LMHeadModel.from_pretrained( self.args.lm_path).to(self.device) self.lm_model.eval() gpt2_tokenizer = AutoTokenizer.from_pretrained(self.args.lm_path) if self.args.lm_path == "IDEA-CCNL/Wenzhong2.0-GPT2-110M-BertTokenizer-chinese":
token_id_map = map_token_ids(transform.SRC.vocab,
1
2023-10-18 10:55:33+00:00
24k
boppreh/hello_tls
src/hello_tls/protocol.py
[ { "identifier": "Protocol", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Protocol(Enum):\n # Keep protocols in order of preference.\n TLS1_3 = b\"\\x03\\x04\"\n TLS1_2 = b\"\\x03\\x03\"\n TLS1_1 = b\"\\x03\\x02\"\n TLS1_0 = b\"\\x03\\x01\"\n SSLv3 = b\"\\x03\\x00\"\n\n def __repr__(self):\n return self.name\n def __lt__(self, other):\n if self.__class__ != other.__class__:\n return NotImplemented\n return self.value < other.value" }, { "identifier": "RecordType", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class RecordType(Enum):\n INVALID = b'\\x00' # Unused in this script.\n CHANGE_CIPHER_SPEC = b'\\x14' # Unused in this script.\n ALERT = b'\\x15'\n HANDSHAKE = b'\\x16'\n APPLICATION_DATA = b'\\x17' # Unused in this script." }, { "identifier": "HandshakeType", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class HandshakeType(Enum):\n client_hello = b'\\x01'\n server_hello = b'\\x02'\n new_session_ticket = b'\\x04'\n end_of_early_data = b'\\x05'\n encrypted_extensions = b'\\x08'\n certificate = b'\\x0B'\n server_key_exchange = b'\\x0C'\n certificate_request = b'\\x0D'\n server_hello_done = b'\\x0E'\n certificate_verify = b'\\x0F'\n finished = b'\\x14'\n certificate_status = b'\\x16'\n key_update = b'\\x18'\n message_hash = b'\\x19'" }, { "identifier": "CompressionMethod", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CompressionMethod(Enum):\n NULL = b'\\x00'\n DEFLATE = b'\\x01'" }, { "identifier": "CipherSuite", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CipherSuite(Enum):\n def __repr__(self):\n return self.name\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each cipher suite with the protocols it's supported at.\n # Default to all but TLS 1.3, because that's the most common.\n def __init__(self, _: bytes, protocols: Sequence[Protocol] = (Protocol.SSLv3, Protocol.TLS1_0, Protocol.TLS1_1, Protocol.TLS1_2)):\n self.protocols = protocols\n\n # Pseudo cipher suite, not actually picked.\n #TLS_EMPTY_RENEGOTIATION_INFO_SCSV = b\"\\x00\\xff\"\n\n # TLS 1.3 cipher suites.\n TLS_AES_128_GCM_SHA256 = b\"\\x13\\x01\", (Protocol.TLS1_3,)\n TLS_AES_256_GCM_SHA384 = b\"\\x13\\x02\", (Protocol.TLS1_3,)\n TLS_CHACHA20_POLY1305_SHA256 = b\"\\x13\\x03\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_SHA256 = b\"\\x13\\x04\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_8_SHA256 = b\"\\x13\\x05\", (Protocol.TLS1_3,)\n\n # Cipher suite that had its number reassigned.\n OLD_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xcc\\x13'\n \n # Cipher suites adapted from IANA assignments:\n # https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4\n TLS_AEGIS_128L_SHA256 = b'\\x13\\x07' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_AEGIS_256_SHA384 = b'\\x13\\x06' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x19' # [RFC4346]\n TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x17' # [RFC4346][RFC6347]\n TLS_DH_anon_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1B' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA = b'\\x00\\x34' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA256 = b'\\x00\\x6C' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA6' # [RFC5288]\n TLS_DH_anon_WITH_AES_256_CBC_SHA = b'\\x00\\x3A' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6D' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA7' # [RFC5288]\n TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x46' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5A' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x47' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5B' # [RFC6209]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x46' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBF' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x84' # [RFC6367]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x89' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC5' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x85' # [RFC6367]\n TLS_DH_anon_WITH_DES_CBC_SHA = b'\\x00\\x1A' # [RFC8996]\n TLS_DH_anon_WITH_RC4_128_MD5 = b'\\x00\\x18' # [RFC5246][RFC6347]\n TLS_DH_anon_WITH_SEED_CBC_SHA = b'\\x00\\x9B' # [RFC4162]\n TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0B' # [RFC4346]\n TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0D' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x30' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3E' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA4' # [RFC5288]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x36' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x68' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA5' # [RFC5288]\n TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3E' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x58' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3F' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x59' # [RFC6209]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x42' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBB' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x82' # [RFC6367]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x85' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC1' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x83' # [RFC6367]\n TLS_DH_DSS_WITH_DES_CBC_SHA = b'\\x00\\x0C' # [RFC8996]\n TLS_DH_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x97' # [RFC4162]\n TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0E' # [RFC4346]\n TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x10' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x31' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3F' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA0' # [RFC5288]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x37' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x69' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA1' # [RFC5288]\n TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x40' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x54' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x41' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x55' # [RFC6209]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x43' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBC' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7E' # [RFC6367]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x86' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC2' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7F' # [RFC6367]\n TLS_DH_RSA_WITH_DES_CBC_SHA = b'\\x00\\x0F' # [RFC8996]\n TLS_DH_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x98' # [RFC4162]\n TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x11' # [RFC4346]\n TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x13' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x32' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x40' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA2' # [RFC5288]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x38' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6A' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA3' # [RFC5288]\n TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x42' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x56' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x43' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x57' # [RFC6209]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x44' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBD' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x80' # [RFC6367]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x87' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC3' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x81' # [RFC6367]\n TLS_DHE_DSS_WITH_DES_CBC_SHA = b'\\x00\\x12' # [RFC8996]\n TLS_DHE_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x99' # [RFC4162]\n TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8F' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x90' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB2' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_128_CCM = b'\\xC0\\xA6' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAA' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x91' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB3' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CCM = b'\\xC0\\xA7' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAB' # [RFC5487]\n TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x66' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6C' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x67' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6D' # [RFC6209]\n TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x96' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x90' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x97' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x91' # [RFC6367]\n TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAD' # [RFC7905]\n TLS_DHE_PSK_WITH_NULL_SHA = b'\\x00\\x2D' # [RFC4785]\n TLS_DHE_PSK_WITH_NULL_SHA256 = b'\\x00\\xB4' # [RFC5487]\n TLS_DHE_PSK_WITH_NULL_SHA384 = b'\\x00\\xB5' # [RFC5487]\n TLS_DHE_PSK_WITH_RC4_128_SHA = b'\\x00\\x8E' # [RFC4279][RFC6347]\n TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x14' # [RFC4346]\n TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x16' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x33' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x67' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CCM = b'\\xC0\\x9E' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA2' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9E' # [RFC5288]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x39' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6B' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CCM = b'\\xC0\\x9F' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA3' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9F' # [RFC5288]\n TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x44' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x52' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x45' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x53' # [RFC6209]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x45' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBE' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7C' # [RFC6367]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x88' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC4' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7D' # [RFC6367]\n TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAA' # [RFC7905]\n TLS_DHE_RSA_WITH_DES_CBC_SHA = b'\\x00\\x15' # [RFC8996]\n TLS_DHE_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x9A' # [RFC4162]\n TLS_ECCPWD_WITH_AES_128_CCM_SHA256 = b'\\xC0\\xB2' # [RFC8492]\n TLS_ECCPWD_WITH_AES_128_GCM_SHA256 = b'\\xC0\\xB0' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_CCM_SHA384 = b'\\xC0\\xB3' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_GCM_SHA384 = b'\\xC0\\xB1' # [RFC8492]\n TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x17' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_128_CBC_SHA = b'\\xC0\\x18' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_256_CBC_SHA = b'\\xC0\\x19' # [RFC8422]\n TLS_ECDH_anon_WITH_NULL_SHA = b'\\xC0\\x15' # [RFC8422]\n TLS_ECDH_anon_WITH_RC4_128_SHA = b'\\xC0\\x16' # [RFC8422][RFC6347]\n TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x03' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x04' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x25' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2D' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x05' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x26' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2E' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4A' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5E' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4B' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5F' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x74' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x88' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x75' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x89' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_NULL_SHA = b'\\xC0\\x01' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x02' # [RFC8422][RFC6347]\n TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x0D' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x0E' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x29' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x31' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0F' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x2A' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x32' # [RFC5289]\n TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4E' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x62' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4F' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x63' # [RFC6209]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x78' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8C' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x79' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8D' # [RFC6367]\n TLS_ECDH_RSA_WITH_NULL_SHA = b'\\xC0\\x0B' # [RFC8422]\n TLS_ECDH_RSA_WITH_RC4_128_SHA = b'\\xC0\\x0C' # [RFC8422][RFC6347]\n TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x08' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x09' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x23' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM = b'\\xC0\\xAC' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = b'\\xC0\\xAE' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2B' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0A' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x24' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM = b'\\xC0\\xAD' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = b'\\xC0\\xAF' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2C' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x48' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5C' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x49' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5D' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x72' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x86' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x73' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x87' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA9' # [RFC7905]\n TLS_ECDHE_ECDSA_WITH_NULL_SHA = b'\\xC0\\x06' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x07' # [RFC8422][RFC6347]\n TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x34' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = b'\\xC0\\x35' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x37' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256 = b'\\xD0\\x03' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256 = b'\\xD0\\x05' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\xD0\\x01' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = b'\\xC0\\x36' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x38' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\xD0\\x02' # [RFC8442]\n TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x70' # [RFC6209]\n TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x71' # [RFC6209]\n TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x9A' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x9B' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAC' # [RFC7905]\n TLS_ECDHE_PSK_WITH_NULL_SHA = b'\\xC0\\x39' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA256 = b'\\xC0\\x3A' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA384 = b'\\xC0\\x3B' # [RFC5489]\n TLS_ECDHE_PSK_WITH_RC4_128_SHA = b'\\xC0\\x33' # [RFC5489][RFC6347]\n TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x12' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x13' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x27' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2F' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x14' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x28' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x30' # [RFC5289]\n TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4C' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x60' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4D' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x61' # [RFC6209]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x76' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8A' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x77' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8B' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA8' # [RFC7905]\n TLS_ECDHE_RSA_WITH_NULL_SHA = b'\\xC0\\x10' # [RFC8422]\n TLS_ECDHE_RSA_WITH_RC4_128_SHA = b'\\xC0\\x11' # [RFC8422][RFC6347]\n TLS_GOSTR341112_256_WITH_28147_CNT_IMIT = b'\\xC1\\x02' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_CTR_OMAC = b'\\xC1\\x00' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_L = b'\\xC1\\x03' # [RFC9367]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_S = b'\\xC1\\x05' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_CTR_OMAC = b'\\xC1\\x01' # [RFC9189]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_L = b'\\xC1\\x04' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_S = b'\\xC1\\x06' # [RFC9367]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = b'\\x00\\x29' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = b'\\x00\\x26' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x2A' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = b'\\x00\\x27' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x2B' # [RFC2712][RFC6347]\n TLS_KRB5_EXPORT_WITH_RC4_40_SHA = b'\\x00\\x28' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = b'\\x00\\x23' # [RFC2712]\n TLS_KRB5_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1F' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_MD5 = b'\\x00\\x22' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_SHA = b'\\x00\\x1E' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_MD5 = b'\\x00\\x25' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_SHA = b'\\x00\\x21' # [RFC2712]\n TLS_KRB5_WITH_RC4_128_MD5 = b'\\x00\\x24' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_RC4_128_SHA = b'\\x00\\x20' # [RFC2712][RFC6347]\n TLS_NULL_WITH_NULL_NULL = b'\\x00\\x00' # [RFC5246]\n TLS_PSK_DHE_WITH_AES_128_CCM_8 = b'\\xC0\\xAA' # [RFC6655]\n TLS_PSK_DHE_WITH_AES_256_CCM_8 = b'\\xC0\\xAB' # [RFC6655]\n TLS_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8B' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x8C' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xAE' # [RFC5487]\n TLS_PSK_WITH_AES_128_CCM = b'\\xC0\\xA4' # [RFC6655]\n TLS_PSK_WITH_AES_128_CCM_8 = b'\\xC0\\xA8' # [RFC6655]\n TLS_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA8' # [RFC5487]\n TLS_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x8D' # [RFC4279]\n TLS_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xAF' # [RFC5487]\n TLS_PSK_WITH_AES_256_CCM = b'\\xC0\\xA5' # [RFC6655]\n TLS_PSK_WITH_AES_256_CCM_8 = b'\\xC0\\xA9' # [RFC6655]\n TLS_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA9' # [RFC5487]\n TLS_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x64' # [RFC6209]\n TLS_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6A' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x65' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6B' # [RFC6209]\n TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x94' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8E' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x95' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8F' # [RFC6367]\n TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAB' # [RFC7905]\n TLS_PSK_WITH_NULL_SHA = b'\\x00\\x2C' # [RFC4785]\n TLS_PSK_WITH_NULL_SHA256 = b'\\x00\\xB0' # [RFC5487]\n TLS_PSK_WITH_NULL_SHA384 = b'\\x00\\xB1' # [RFC5487]\n TLS_PSK_WITH_RC4_128_SHA = b'\\x00\\x8A' # [RFC4279][RFC6347]\n TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x08' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x06' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x03' # [RFC4346][RFC6347]\n TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x93' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x94' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB6' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAC' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x95' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB7' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAD' # [RFC5487]\n TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x68' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6E' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x69' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6F' # [RFC6209]\n TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x98' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x92' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x99' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x93' # [RFC6367]\n TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAE' # [RFC7905]\n TLS_RSA_PSK_WITH_NULL_SHA = b'\\x00\\x2E' # [RFC4785]\n TLS_RSA_PSK_WITH_NULL_SHA256 = b'\\x00\\xB8' # [RFC5487]\n TLS_RSA_PSK_WITH_NULL_SHA384 = b'\\x00\\xB9' # [RFC5487]\n TLS_RSA_PSK_WITH_RC4_128_SHA = b'\\x00\\x92' # [RFC4279][RFC6347]\n TLS_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0A' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x2F' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3C' # [RFC5246]\n TLS_RSA_WITH_AES_128_CCM = b'\\xC0\\x9C' # [RFC6655]\n TLS_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA0' # [RFC6655]\n TLS_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9C' # [RFC5288]\n TLS_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x35' # [RFC5246]\n TLS_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x3D' # [RFC5246]\n TLS_RSA_WITH_AES_256_CCM = b'\\xC0\\x9D' # [RFC6655]\n TLS_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA1' # [RFC6655]\n TLS_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9D' # [RFC5288]\n TLS_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3C' # [RFC6209]\n TLS_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x50' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3D' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x51' # [RFC6209]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x41' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBA' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7A' # [RFC6367]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x84' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC0' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7B' # [RFC6367]\n TLS_RSA_WITH_DES_CBC_SHA = b'\\x00\\x09' # [RFC8996]\n TLS_RSA_WITH_IDEA_CBC_SHA = b'\\x00\\x07' # [RFC8996]\n TLS_RSA_WITH_NULL_MD5 = b'\\x00\\x01' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA = b'\\x00\\x02' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA256 = b'\\x00\\x3B' # [RFC5246]\n TLS_RSA_WITH_RC4_128_MD5 = b'\\x00\\x04' # [RFC5246][RFC6347]\n TLS_RSA_WITH_RC4_128_SHA = b'\\x00\\x05' # [RFC5246][RFC6347]\n TLS_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x96' # [RFC4162]\n TLS_SHA256_SHA256 = b'\\xC0\\xB4' # [RFC9150]\n TLS_SHA384_SHA384 = b'\\xC0\\xB5' # [RFC9150]\n TLS_SM4_CCM_SM3 = b'\\x00\\xC7' # [RFC8998]\n TLS_SM4_GCM_SM3 = b'\\x00\\xC6' # [RFC8998]\n TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1C' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = b'\\xC0\\x1F' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = b'\\xC0\\x22' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1B' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1E' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x21' # [RFC5054]\n TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1A' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1D' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_256_CBC_SHA = b'\\xC0\\x20' # [RFC5054]" }, { "identifier": "ExtensionType", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class ExtensionType(Enum):\n server_name = b'\\x00\\x00'\n max_fragment_length = b'\\x00\\x01'\n client_certificate_url = b'\\x00\\x02'\n trusted_ca_keys = b'\\x00\\x03'\n truncated_hmac = b'\\x00\\x04'\n status_request = b'\\x00\\x05'\n user_mapping = b'\\x00\\x06'\n client_authz = b'\\x00\\x07'\n server_authz = b'\\x00\\x08'\n cert_type = b'\\x00\\x09'\n supported_groups = b'\\x00\\x0a'\n ec_point_formats = b'\\x00\\x0b'\n srp = b'\\x00\\x0c'\n signature_algorithms = b'\\x00\\x0d'\n use_srtp = b'\\x00\\x0e'\n heartbeat = b'\\x00\\x0f'\n application_layer_protocol_negotiation = b'\\x00\\x10'\n status_request_v2 = b'\\x00\\x11'\n signed_certificate_timestamp = b'\\x00\\x12'\n client_certificate_type = b'\\x00\\x13'\n server_certificate_type = b'\\x00\\x14'\n padding = b'\\x00\\x15'\n encrypt_then_mac = b'\\x00\\x16'\n extended_master_secret = b'\\x00\\x17'\n token_binding = b'\\x00\\x18'\n cached_info = b'\\x00\\x19'\n tls_lts = b'\\x00\\x1a'\n compress_certificate = b'\\x00\\x1b'\n record_size_limit = b'\\x00\\x1c'\n pwd_protect = b'\\x00\\x1d'\n pwd_clear = b'\\x00\\x1e'\n password_salt = b'\\x00\\x1f'\n ticket_pinning = b'\\x00\\x20'\n tls_cert_with_extern_psk = b'\\x00\\x21'\n delegated_credential = b'\\x00\\x22'\n session_ticket = b'\\x00\\x23'\n TLMSP = b'\\x00\\x24'\n TLMSP_proxying = b'\\x00\\x25'\n TLMSP_delegate = b'\\x00\\x26'\n supported_ekt_ciphers = b'\\x00\\x27'\n pre_shared_key = b'\\x00\\x29'\n early_data = b'\\x00\\x2a'\n supported_versions = b'\\x00\\x2b'\n cookie = b'\\x00\\x2c'\n psk_key_exchange_modes = b'\\x00\\x2d'\n certificate_authorities = b'\\x00\\x2f'\n oid_filters = b'\\x00\\x30'\n post_handshake_auth = b'\\x00\\x31'\n signature_algorithms_cert = b'\\x00\\x32'\n key_share = b'\\x00\\x33'\n transparency_info = b'\\x00\\x34'\n connection_id_deprecated = b'\\x00\\x35'\n connection_id = b'\\x00\\x36'\n external_id_hash = b'\\x00\\x37'\n external_session_id = b'\\x00\\x38'\n quic_transport_parameters = b'\\x00\\x39'\n ticket_request = b'\\x00\\x3a'\n dnssec_chain = b'\\x00\\x3b'\n sequence_number_encryption_algorithms = b'\\x00\\x3c'" }, { "identifier": "Group", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Group(Enum):\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each group with whether it's a PQ group.\n def __init__(self, _: bytes, is_pq: bool = False):\n self.is_pq = is_pq\n def __repr__(self):\n return self.name\n \n sect163k1 = b'\\x00\\x01'\n sect163r1 = b'\\x00\\x02'\n sect163r2 = b'\\x00\\x03'\n sect193r1 = b'\\x00\\x04'\n sect193r2 = b'\\x00\\x05'\n sect233k1 = b'\\x00\\x06'\n sect233r1 = b'\\x00\\x07'\n sect239k1 = b'\\x00\\x08'\n sect283k1 = b'\\x00\\x09'\n sect283r1 = b'\\x00\\x0a'\n sect409k1 = b'\\x00\\x0b'\n sect409r1 = b'\\x00\\x0c'\n sect571k1 = b'\\x00\\x0d'\n sect571r1 = b'\\x00\\x0e'\n secp160k1 = b'\\x00\\x0f'\n secp160r1 = b'\\x00\\x10'\n secp160r2 = b'\\x00\\x11'\n secp192k1 = b'\\x00\\x12'\n secp192r1 = b'\\x00\\x13'\n secp224k1 = b'\\x00\\x14'\n secp224r1 = b'\\x00\\x15'\n secp256k1 = b'\\x00\\x16'\n secp256r1 = b'\\x00\\x17'\n secp384r1 = b'\\x00\\x18'\n secp521r1 = b'\\x00\\x19'\n brainpoolP256r1 = b'\\x00\\x1a'\n brainpoolP384r1 = b'\\x00\\x1b'\n brainpoolP512r1 = b'\\x00\\x1c'\n x25519 = b'\\x00\\x1d'\n x448 = b'\\x00\\x1e'\n brainpoolP256r1tls13 = b'\\x00\\x1f'\n brainpoolP384r1tls13 = b'\\x00\\x20'\n brainpoolP512r1tls13 = b'\\x00\\x21'\n GC256A = b'\\x00\\x22'\n GC256B = b'\\x00\\x23'\n GC256C = b'\\x00\\x24'\n GC256D = b'\\x00\\x25'\n GC512A = b'\\x00\\x26'\n GC512B = b'\\x00\\x27'\n GC512C = b'\\x00\\x28'\n curveSM2 = b'\\x00\\x29'\n ffdhe2048 = b'\\x01\\x00'\n ffdhe3072 = b'\\x01\\x01'\n ffdhe4096 = b'\\x01\\x02'\n ffdhe6144 = b'\\x01\\x03'\n ffdhe8192 = b'\\x01\\x04'\n arbitrary_explicit_prime_curves = b'\\xff\\x01'\n arbitrary_explicit_char2_curves = b'\\xff\\x02'\n\n # Somewhat common post-quantum groups, not yet standardized:\n X25519Kyber768Draft00 = b'\\x63\\x99', True\n X25519Kyber768Draft00_obsolete = b'\\xfe\\x31', True\n X25519Kyber512Draft00 = b'\\xfe\\x30', True\n SecP256r1Kyber768Draft00 = b'\\x63\\x9a', True\n\n # Long list of unusual post-quantum groups from liboqs:\n # https://github.com/open-quantum-safe/oqs-provider/blob/main/ALGORITHMS.md?plain=1#L13\n frodo640aes = b'\\x02\\x00', True\n p256_frodo640aes = b'\\x2F\\x00', True\n x25519_frodo640aes = b'\\x2F\\x80', True\n frodo640shake = b'\\x02\\x01', True\n p256_frodo640shake = b'\\x2F\\x01', True\n x25519_frodo640shake = b'\\x2F\\x81', True\n frodo976aes = b'\\x02\\x02', True\n p384_frodo976aes = b'\\x2F\\x02', True\n x448_frodo976aes = b'\\x2F\\x82', True\n frodo976shake = b'\\x02\\x03', True\n p384_frodo976shake = b'\\x2F\\x03', True\n x448_frodo976shake = b'\\x2F\\x83', True\n frodo1344aes = b'\\x02\\x04', True\n p521_frodo1344aes = b'\\x2F\\x04', True\n frodo1344shake = b'\\x02\\x05', True\n p521_frodo1344shake = b'\\x2F\\x05', True\n kyber512 = b'\\x02\\x3A', True\n p256_kyber512 = b'\\x2F\\x3A', True\n x25519_kyber512 = b'\\x2F\\x39', True\n kyber768 = b'\\x02\\x3C', True\n p384_kyber768 = b'\\x2F\\x3C', True\n x448_kyber768 = b'\\x2F\\x90', True\n kyber1024 = b'\\x02\\x3D', True\n p521_kyber1024 = b'\\x2F\\x3D', True\n bikel1 = b'\\x02\\x41', True\n p256_bikel1 = b'\\x2F\\x41', True\n x25519_bikel1 = b'\\x2F\\xAE', True\n bikel3 = b'\\x02\\x42', True\n p384_bikel3 = b'\\x2F\\x42', True\n x448_bikel3 = b'\\x2F\\xAF', True\n bikel5 = b'\\x02\\x43', True\n p521_bikel5 = b'\\x2F\\x43', True\n hqc128 = b'\\x02\\x2C', True\n p256_hqc128 = b'\\x2F\\x2C', True\n x25519_hqc128 = b'\\x2F\\xAC', True\n hqc192 = b'\\x02\\x2D', True\n p384_hqc192 = b'\\x2F\\x2D', True\n x448_hqc192 = b'\\x2F\\xAD', True\n hqc256 = b'\\x02\\x2E', True\n p521_hqc256 = b'\\x2F\\x2E', True\n dilithium2 = b'\\xfe\\xa0', True\n p256_dilithium2 = b'\\xfe\\xa1', True\n rsa3072_dilithium2 = b'\\xfe\\xa2', True\n dilithium3 = b'\\xfe\\xa3', True\n p384_dilithium3 = b'\\xfe\\xa4', True\n dilithium5 = b'\\xfe\\xa5', True\n p521_dilithium5 = b'\\xfe\\xa6', True\n falcon512 = b'\\xfe\\xae', True\n p256_falcon512 = b'\\xfe\\xaf', True\n rsa3072_falcon512 = b'\\xfe\\xb0', True\n falcon1024 = b'\\xfe\\xb1', True\n p521_falcon1024 = b'\\xfe\\xb2', True\n sphincssha2128fsimple = b'\\xfe\\xb3', True\n p256_sphincssha2128fsimple = b'\\xfe\\xb4', True\n rsa3072_sphincssha2128fsimple = b'\\xfe\\xb5', True\n sphincssha2128ssimple = b'\\xfe\\xb6', True\n p256_sphincssha2128ssimple = b'\\xfe\\xb7', True\n rsa3072_sphincssha2128ssimple = b'\\xfe\\xb8', True\n sphincssha2192fsimple = b'\\xfe\\xb9', True\n p384_sphincssha2192fsimple = b'\\xfe\\xba', True\n sphincssha2192ssimple = b'\\xfe\\xbb', True\n p384_sphincssha2192ssimple = b'\\xfe\\xbc', True\n sphincssha2256fsimple = b'\\xfe\\xbd', True\n p521_sphincssha2256fsimple = b'\\xfe\\xbe', True\n sphincssha2256ssimple = b'\\xfe\\xc0', True\n p521_sphincssha2256ssimple = b'\\xfe\\xc1', True\n sphincsshake128fsimple = b'\\xfe\\xc2', True\n p256_sphincsshake128fsimple = b'\\xfe\\xc3', True\n rsa3072_sphincsshake128fsimple = b'\\xfe\\xc4', True\n sphincsshake128ssimple = b'\\xfe\\xc5', True\n p256_sphincsshake128ssimple = b'\\xfe\\xc6', True\n rsa3072_sphincsshake128ssimple = b'\\xfe\\xc7', True\n sphincsshake192fsimple = b'\\xfe\\xc8', True\n p384_sphincsshake192fsimple = b'\\xfe\\xc9', True\n sphincsshake192ssimple = b'\\xfe\\xca', True\n p384_sphincsshake192ssimple = b'\\xfe\\xcb', True\n sphincsshake256fsimple = b'\\xfe\\xcc', True\n p521_sphincsshake256fsimple = b'\\xfe\\xcd', True\n sphincsshake256ssimple = b'\\xfe\\xce', True\n p521_sphincsshake256ssimple = b'\\xfe\\xcf', True" }, { "identifier": "AlertLevel", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class AlertLevel(Enum):\n \"\"\" Different alert levels that can be sent by the server. \"\"\"\n WARNING = b'\\x01'\n FATAL = b'\\x02'" }, { "identifier": "AlertDescription", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class AlertDescription(Enum):\n \"\"\" Different alert messages that can be sent by the server. \"\"\"\n close_notify = b'\\x00'\n unexpected_message = b'\\x0a'\n bad_record_mac = b'\\x14'\n record_overflow = b'\\x16'\n handshake_failure = b'\\x28'\n bad_certificate = b'\\x2a'\n unsupported_certificate = b'\\x2b'\n certificate_revoked = b'\\x2c'\n certificate_expired = b'\\x2d'\n certificate_unknown = b'\\x2e'\n illegal_parameter = b'\\x2f'\n unknown_ca = b'\\x30'\n access_denied = b'\\x31'\n decode_error = b'\\x32'\n decrypt_error = b'\\x33'\n protocol_version = b'\\x46'\n insufficient_security = b'\\x47'\n internal_error = b'\\x50'\n inappropriate_fallback = b'\\x56'\n user_canceled = b'\\x5a'\n missing_extension = b'\\x6d'\n unsupported_extension = b'\\x6e'\n unrecognized_name = b'\\x70'\n bad_certificate_status_response = b'\\x71'\n unknown_psk_identity = b'\\x73'\n certificate_required = b'\\x74'\n no_application_protocol = b'\\x78'" }, { "identifier": "PskKeyExchangeMode", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class PskKeyExchangeMode(Enum):\n psk_ke = b'\\x00'\n psk_dhe_ke = b'\\x01'" } ]
from typing import Iterator, List, Sequence, Optional, Iterable, Callable, Tuple from contextlib import contextmanager from dataclasses import dataclass from .names_and_numbers import Protocol, RecordType, HandshakeType, CompressionMethod, CipherSuite, ExtensionType, Group, AlertLevel, AlertDescription, PskKeyExchangeMode import logging
15,185
logger = logging.getLogger(__name__) class ScanError(Exception): """ Base error class for errors that occur during scanning. """ pass class ServerAlertError(ScanError): def __init__(self, level: AlertLevel, description: AlertDescription): super().__init__(self, f'Server error: {level}: {description}') self.level = level self.description = description class BadServerResponse(ScanError): """ Error for server responses that can't be parsed. """ pass @dataclass class ServerHello: version: Protocol compression: CompressionMethod cipher_suite: CipherSuite group: Optional[Group] def _make_stream_parser(packets: Iterable[bytes]) -> Tuple[Callable[[int], bytes], Callable[[], int]]: """ Returns helper functions to parse a stream of packets. """ start = 0 packets_iter = iter(packets) data = b'' def read_next(length: int) -> bytes: nonlocal start, data while start + length > len(data): try: data += next(packets_iter) except StopIteration: raise BadServerResponse('Server response ended unexpectedly') value = data[start:start+length] start += length return value return read_next, lambda: start def _bytes_to_int(b: bytes) -> int: return int.from_bytes(b, byteorder='big') def parse_server_hello(packets: Iterable[bytes]) -> ServerHello: """ Parses a Server Hello packet and returns the cipher suite accepted by the server. """ read_next, current_position = _make_stream_parser(packets) record_type = RecordType(read_next(1)) legacy_record_version = read_next(2) record_length = _bytes_to_int(read_next(2)) record_end = current_position() + record_length if record_type == RecordType.ALERT: # Server responded with an error. alert_level = AlertLevel(read_next(1)) alert_description = AlertDescription(read_next(1)) raise ServerAlertError(alert_level, alert_description) assert record_type == RecordType.HANDSHAKE, record_type handshake_type = HandshakeType(read_next(1)) assert handshake_type == HandshakeType.server_hello, handshake_type server_hello_length = _bytes_to_int(read_next(3)) # At most TLS 1.2. Handshakes for TLS 1.3 use the supported_versions extension. version = Protocol(read_next(2)) server_random = read_next(32) session_id_length = read_next(1) session_id = read_next(_bytes_to_int(session_id_length)) cipher_suite = CipherSuite(read_next(2)) compression_method = CompressionMethod(read_next(1)) extensions_length = _bytes_to_int(read_next(2)) extensions_end = current_position() + extensions_length group = None while current_position() < extensions_end:
logger = logging.getLogger(__name__) class ScanError(Exception): """ Base error class for errors that occur during scanning. """ pass class ServerAlertError(ScanError): def __init__(self, level: AlertLevel, description: AlertDescription): super().__init__(self, f'Server error: {level}: {description}') self.level = level self.description = description class BadServerResponse(ScanError): """ Error for server responses that can't be parsed. """ pass @dataclass class ServerHello: version: Protocol compression: CompressionMethod cipher_suite: CipherSuite group: Optional[Group] def _make_stream_parser(packets: Iterable[bytes]) -> Tuple[Callable[[int], bytes], Callable[[], int]]: """ Returns helper functions to parse a stream of packets. """ start = 0 packets_iter = iter(packets) data = b'' def read_next(length: int) -> bytes: nonlocal start, data while start + length > len(data): try: data += next(packets_iter) except StopIteration: raise BadServerResponse('Server response ended unexpectedly') value = data[start:start+length] start += length return value return read_next, lambda: start def _bytes_to_int(b: bytes) -> int: return int.from_bytes(b, byteorder='big') def parse_server_hello(packets: Iterable[bytes]) -> ServerHello: """ Parses a Server Hello packet and returns the cipher suite accepted by the server. """ read_next, current_position = _make_stream_parser(packets) record_type = RecordType(read_next(1)) legacy_record_version = read_next(2) record_length = _bytes_to_int(read_next(2)) record_end = current_position() + record_length if record_type == RecordType.ALERT: # Server responded with an error. alert_level = AlertLevel(read_next(1)) alert_description = AlertDescription(read_next(1)) raise ServerAlertError(alert_level, alert_description) assert record_type == RecordType.HANDSHAKE, record_type handshake_type = HandshakeType(read_next(1)) assert handshake_type == HandshakeType.server_hello, handshake_type server_hello_length = _bytes_to_int(read_next(3)) # At most TLS 1.2. Handshakes for TLS 1.3 use the supported_versions extension. version = Protocol(read_next(2)) server_random = read_next(32) session_id_length = read_next(1) session_id = read_next(_bytes_to_int(session_id_length)) cipher_suite = CipherSuite(read_next(2)) compression_method = CompressionMethod(read_next(1)) extensions_length = _bytes_to_int(read_next(2)) extensions_end = current_position() + extensions_length group = None while current_position() < extensions_end:
extension_type = ExtensionType(read_next(2))
5
2023-10-21 02:00:13+00:00
24k
zhaojw1998/AccoMontage-3
arrangement_utils.py
[ { "identifier": "split_phrases", "path": "piano_arranger/acc_utils.py", "snippet": "def split_phrases(segmentation):\n \"\"\"Split a phrase label string into individual phrase meta info\"\"\"\n if '\\n' not in segmentation:\n segmentation += '\\n'\n phrases = []\n lengths = []\n current = 0\n while segmentation[current] != '\\n':\n if segmentation[current].isalpha():\n j = 1\n while not (segmentation[current + j].isalpha() or segmentation[current + j] == '\\n'):\n j += 1\n phrases.append(segmentation[current])\n lengths.append(int(segmentation[current+1: current+j]))\n current += j\n return [(phrases[i], lengths[i], sum(lengths[:i])) for i in range(len(phrases))] " }, { "identifier": "DisentangleVAE", "path": "piano_arranger/models/Poly_Dis.py", "snippet": "class DisentangleVAE(PytorchModel):\n\n def __init__(self, name, device, chd_encoder, rhy_encoder, decoder,\n chd_decoder):\n super(DisentangleVAE, self).__init__(name, device)\n self.chd_encoder = chd_encoder\n self.rhy_encoder = rhy_encoder\n self.decoder = decoder\n self.num_step = self.decoder.num_step\n self.chd_decoder = chd_decoder\n\n def confuse_prmat(self, pr_mat):\n non_zero_ent = torch.nonzero(pr_mat.long())\n eps = torch.randint(0, 2, (non_zero_ent.size(0),))\n eps = ((2 * eps) - 1).long()\n confuse_ent = torch.clamp(non_zero_ent[:, 2] + eps, min=0, max=127)\n pr_mat[non_zero_ent[:, 0], non_zero_ent[:, 1], confuse_ent] = \\\n pr_mat[non_zero_ent[:, 0], non_zero_ent[:, 1], non_zero_ent[:, 2]]\n return pr_mat\n\n def get_chroma(self, pr_mat):\n bs = pr_mat.size(0)\n pad = torch.zeros(bs, 32, 4).to(self.device)\n pr_mat = torch.cat([pr_mat, pad], dim=-1)\n c = pr_mat.view(bs, 32, -1, 12).contiguous()\n c = c.sum(dim=-2) # (bs, 32, 12)\n c = c.view(bs, 8, 4, 12)\n c = c.sum(dim=-2).float()\n c = torch.log(c + 1)\n return c.to(self.device)\n\n def run(self, x, c, pr_mat, tfr1, tfr2, tfr3, confuse=True):\n embedded_x, lengths = self.decoder.emb_x(x)\n # cc = self.get_chroma(pr_mat)\n dist_chd = self.chd_encoder(c)\n # pr_mat = self.confuse_prmat(pr_mat)\n dist_rhy = self.rhy_encoder(pr_mat)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n pitch_outs, dur_outs = self.decoder(dec_z, False, embedded_x,\n lengths, tfr1, tfr2)\n recon_root, recon_chroma, recon_bass = self.chd_decoder(z_chd, False,\n tfr3, c)\n return pitch_outs, dur_outs, dist_chd, dist_rhy, recon_root, \\\n recon_chroma, recon_bass\n\n def loss_function(self, x, c, recon_pitch, recon_dur, dist_chd,\n dist_rhy, recon_root, recon_chroma, recon_bass,\n beta, weights, weighted_dur=False):\n recon_loss, pl, dl = self.decoder.recon_loss(x, recon_pitch, recon_dur,\n weights, weighted_dur)\n kl_loss, kl_chd, kl_rhy = self.kl_loss(dist_chd, dist_rhy)\n chord_loss, root, chroma, bass = self.chord_loss(c, recon_root,\n recon_chroma,\n recon_bass)\n loss = recon_loss + beta * kl_loss + chord_loss\n return loss, recon_loss, pl, dl, kl_loss, kl_chd, kl_rhy, chord_loss, \\\n root, chroma, bass\n\n def chord_loss(self, c, recon_root, recon_chroma, recon_bass):\n loss_fun = nn.CrossEntropyLoss()\n root = c[:, :, 0: 12].max(-1)[-1].view(-1).contiguous()\n chroma = c[:, :, 12: 24].long().view(-1).contiguous()\n bass = c[:, :, 24:].max(-1)[-1].view(-1).contiguous()\n\n recon_root = recon_root.view(-1, 12).contiguous()\n recon_chroma = recon_chroma.view(-1, 2).contiguous()\n recon_bass = recon_bass.view(-1, 12).contiguous()\n root_loss = loss_fun(recon_root, root)\n chroma_loss = loss_fun(recon_chroma, chroma)\n bass_loss = loss_fun(recon_bass, bass)\n chord_loss = root_loss + chroma_loss + bass_loss\n return chord_loss, root_loss, chroma_loss, bass_loss\n\n def kl_loss(self, *dists):\n # kl = kl_with_normal(dists[0])\n kl_chd = kl_with_normal(dists[0])\n kl_rhy = kl_with_normal(dists[1])\n kl_loss = kl_chd + kl_rhy\n return kl_loss, kl_chd, kl_rhy\n\n def loss(self, x, c, pr_mat, dt_x, tfr1=0., tfr2=0., tfr3=0., beta=0.1, weights=(1, 0.5)):\n #print(pr_mat.shape, dt_x.shape)\n outputs = self.run(x, c, pr_mat, tfr1, tfr2, tfr3)\n loss = self.loss_function(x, c, *outputs, beta, weights)\n return loss\n\n # def inference(self, c, pr_mat):\n # self.eval()\n # with torch.no_grad():\n # dist_chd = self.chd_encoder(c)\n # # pr_mat = self.confuse_prmat(pr_mat)\n # dist_rhy = self.rhy_encoder(pr_mat)\n # z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n # dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n # pitch_outs, dur_outs = self.decoder(dec_z, True, None,\n # None, 0., 0.)\n # est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)\n # return est_x\n #\n # def swap(self, c1, c2, pr_mat1, pr_mat2, fix_rhy, fix_chd):\n # pr_mat = pr_mat1 if fix_rhy else pr_mat2\n # c = c1 if fix_chd else c2\n # est_x = self.inference(c, pr_mat)\n # return est_x\n\n def inference_encode(self, pr_mat, c):\n self.eval()\n with torch.no_grad():\n dist_chd = self.chd_encoder(c)\n dist_rhy = self.rhy_encoder(pr_mat)\n return dist_chd, dist_rhy\n\n def inference_decode(self, z_chd, z_rhy):\n self.eval()\n with torch.no_grad():\n dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n pitch_outs, dur_outs = self.decoder(dec_z, True, None,\n None, 0., 0.)\n est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)\n return est_x\n\n def inference(self, pr_mat, c, sample):\n self.eval()\n with torch.no_grad():\n dist_chd = self.chd_encoder(c)\n dist_rhy = self.rhy_encoder(pr_mat)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], sample)\n dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n pitch_outs, dur_outs = self.decoder(dec_z, True, None,\n None, 0., 0.)\n est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)\n return est_x\n\n def swap(self, pr_mat1, pr_mat2, c1, c2, fix_rhy, fix_chd):\n pr_mat = pr_mat1 if fix_rhy else pr_mat2\n c = c1 if fix_chd else c2\n est_x = self.inference(pr_mat, c, sample=False)\n return est_x\n\n def posterior_sample(self, pr_mat, c, scale=None, sample_chd=True,\n sample_txt=True):\n if scale is None and sample_chd and sample_txt:\n est_x = self.inference(pr_mat, c, sample=True)\n else:\n dist_chd, dist_rhy = self.inference_encode(pr_mat, c)\n if scale is not None:\n mean_chd = dist_chd.mean\n mean_rhy = dist_rhy.mean\n # std_chd = torch.ones_like(dist_chd.mean) * scale\n # std_rhy = torch.ones_like(dist_rhy.mean) * scale\n std_chd = dist_chd.scale * scale\n std_rhy = dist_rhy.scale * scale\n dist_rhy = Normal(mean_rhy, std_rhy)\n dist_chd = Normal(mean_chd, std_chd)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n if not sample_chd:\n z_chd = dist_chd.mean\n if not sample_txt:\n z_rhy = dist_rhy.mean\n est_x = self.inference_decode(z_chd, z_rhy)\n return est_x\n\n def prior_sample(self, x, c, sample_chd=False, sample_rhy=False,\n scale=1.):\n dist_chd, dist_rhy = self.inference_encode(x, c)\n mean = torch.zeros_like(dist_rhy.mean)\n loc = torch.ones_like(dist_rhy.mean) * scale\n if sample_chd:\n dist_chd = Normal(mean, loc)\n if sample_rhy:\n dist_rhy = Normal(mean, loc)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n return self.inference_decode(z_chd, z_rhy)\n\n def gt_sample(self, x):\n out = x[:, :, 1:].numpy()\n return out\n\n def interp(self, pr_mat1, c1, pr_mat2, c2, interp_chd=False,\n interp_rhy=False, int_count=10):\n dist_chd1, dist_rhy1 = self.inference_encode(pr_mat1, c1)\n dist_chd2, dist_rhy2 = self.inference_encode(pr_mat2, c2)\n [z_chd1, z_rhy1, z_chd2, z_rhy2] = \\\n get_zs_from_dists([dist_chd1, dist_rhy1, dist_chd2, dist_rhy2],\n False)\n if interp_chd:\n z_chds = self.interp_z(z_chd1, z_chd2, int_count)\n else:\n z_chds = z_chd1.unsqueeze(1).repeat(1, int_count, 1)\n if interp_rhy:\n z_rhys = self.interp_z(z_rhy1, z_rhy2, int_count)\n else:\n z_rhys = z_rhy1.unsqueeze(1).repeat(1, int_count, 1)\n bs = z_chds.size(0)\n z_chds = z_chds.view(bs * int_count, -1).contiguous()\n z_rhys = z_rhys.view(bs * int_count, -1).contiguous()\n estxs = self.inference_decode(z_chds, z_rhys)\n return estxs.reshape((bs, int_count, 32, 15, -1))\n\n def interp_z(self, z1, z2, int_count=10):\n z1 = z1.numpy()\n z2 = z2.numpy()\n zs = torch.stack([self.interp_path(zz1, zz2, int_count)\n for zz1, zz2 in zip(z1, z2)], dim=0)\n return zs\n\n def interp_path(self, z1, z2, interpolation_count=10):\n result_shape = z1.shape\n z1 = z1.reshape(-1)\n z2 = z2.reshape(-1)\n\n def slerp2(p0, p1, t):\n omega = np.arccos(\n np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))\n so = np.sin(omega)\n return np.sin((1.0 - t) * omega)[:, None] / so * p0[\n None] + np.sin(\n t * omega)[:, None] / so * p1[None]\n\n percentages = np.linspace(0.0, 1.0, interpolation_count)\n\n normalized_z1 = z1 / np.linalg.norm(z1)\n normalized_z2 = z2 / np.linalg.norm(z2)\n dirs = slerp2(normalized_z1, normalized_z2, percentages)\n length = np.linspace(np.log(np.linalg.norm(z1)),\n np.log(np.linalg.norm(z2)),\n interpolation_count)\n out = (dirs * np.exp(length[:, None])).reshape(\n [interpolation_count] + list(result_shape))\n # out = np.array([(1 - t) * z1 + t * z2 for t in percentages])\n return torch.from_numpy(out).to(self.device).float()\n\n @staticmethod\n def init_model(device=None, chd_size=256, txt_size=256, num_channel=10):\n name = 'disvae'\n if device is None:\n device = torch.device('cuda' if torch.cuda.is_available()\n else 'cpu')\n # chd_encoder = RnnEncoder(36, 1024, 256)\n chd_encoder = RnnEncoder(36, 1024, chd_size)\n # rhy_encoder = TextureEncoder(256, 1024, 256)\n rhy_encoder = TextureEncoder(256, 1024, txt_size, num_channel)\n # pt_encoder = PtvaeEncoder(device=device, z_size=152)\n # chd_decoder = RnnDecoder(z_dim=256)\n chd_decoder = RnnDecoder(z_dim=chd_size)\n # pt_decoder = PtvaeDecoder(note_embedding=None,\n # dec_dur_hid_size=64, z_size=512)\n pt_decoder = PtvaeDecoder(note_embedding=None,\n dec_dur_hid_size=64,\n z_size=chd_size + txt_size)\n\n model = DisentangleVAE(name, device, chd_encoder,\n rhy_encoder, pt_decoder, chd_decoder)\n return model" }, { "identifier": "find_by_length", "path": "piano_arranger/AccoMontage.py", "snippet": "def find_by_length(melody_data, acc_data, chord_data, velocity_data, cc_data, length):\n \"\"\"Search from POP909 phrase data for a certain phrase length.\"\"\"\n melody_record = []\n acc_record = []\n chord_record = []\n velocity_record = []\n cc_record = []\n song_reference = []\n for song_idx in range(acc_data.shape[0]):\n for phrase_idx in range(len(acc_data[song_idx])):\n melody = melody_data[song_idx][phrase_idx]\n if not melody.shape[0] == length * 16:\n continue\n if np.sum(melody[:, :128]) <= 2:\n continue\n melody_record.append(melody)\n acc = acc_data[song_idx][phrase_idx]\n acc_record.append(acc)\n chord = chord_data[song_idx][phrase_idx]\n chord_record.append(chord)\n velocity = velocity_data[song_idx][phrase_idx]\n velocity_record.append(velocity)\n cc = cc_data[song_idx][phrase_idx]\n cc_record.append(cc)\n song_reference.append((song_idx, phrase_idx))\n return np.array(melody_record), np.array(acc_record), np.array(chord_record), np.array(velocity_record), np.array(cc_record), song_reference" }, { "identifier": "dp_search", "path": "piano_arranger/AccoMontage.py", "snippet": "def dp_search(query_phrases, seg_query, acc_pool, edge_weights, texture_filter=None, filter_id=None, spotlights=None, randomness=0):\n \"\"\"Search for texture donors based on dynamic programming.\n * query_phrases: lead sheet in segmented phrases. Shape of each phrase: (T, 142), quantized at 1/4-beat level. This format is defined in R. Yang et al., \"Deep music analogy via latent representation disentanglement,\" ISMIR 2019.\n * seg_query: phrase annotation for the lead sheet. Format of each phrase: (label, length, start). For example, seg_query=[('A', 8, 0), ('A', 8, 8), ('B', 4, 16)].\n * acc_pool: search space for piano texture donors.\n * edge_weights: pre-computed transition scores for texture donor i to i+1.\n * texture_filter: filter on voice number (VN) and rhythmic density (RD).\n * filter_id: specified VN abd RD to filter for the first phrase.\n * spotlights: specified a preference for certain songs and/or artists for the search process.\n * randomness: degree of randomness tobe introduced to the search process.\n \"\"\"\n seg_query = [item[0] + str(item[1]) for item in seg_query] #['A8', 'A8', 'B8', 'B8']\n #Searching for phrase 1\n query_length = [query_phrases[i].shape[0]//16 for i in range(len(query_phrases))]\n mel, acc, chord, _, _, song_ref = acc_pool[query_length[0]]\n mel_set = mel\n rhy_set = np.concatenate((np.sum(mel_set[:, :, :128], axis=-1, keepdims=True), mel_set[:, :, 128: 130]), axis=-1)\n query_rhy = np.concatenate((np.sum(query_phrases[0][:, : 128], axis=-1, keepdims=True), query_phrases[0][:, 128: 130]), axis=-1)[np.newaxis, :, :]\n rhythm_result = cosine_rhy(query_rhy+1e-5, rhy_set+1e-5)\n\n chord_set = chord\n chord_set, num_total, shift_const = chord_shift(chord_set)\n chord_set_TIV = computeTIV(chord_set)\n query_chord = query_phrases[0][:, 130:][::4]\n query_chord_TIV = computeTIV(query_chord)[np.newaxis, :, :]\n chord_score, arg_chord = cosine(query_chord_TIV, chord_set_TIV)\n\n score = .5*rhythm_result + .5*chord_score\n score += randomness * np.random.normal(0, 1, size=len(score)) #to introduce some randomness\n if spotlights is not None:\n for spot_idx in spotlights:\n for ref_idx, ref_item in enumerate(song_ref):\n if ref_item[0] == spot_idx: \n score[ref_idx] += 1\n if filter_id is not None:\n mask = texture_filter[query_length[0]][0][filter_id[0]] * texture_filter[query_length[0]][1][filter_id[1]] - 1\n score += mask\n\n path = [[(i, score[i])] for i in range(acc.shape[0])]\n shift = [[shift_const[i]] for i in arg_chord]\n melody_record = np.argmax(mel_set, axis=-1)\n record = []\n\n #Searching for phrase 2, 3, ...\n for i in tqdm(range(1, len(query_length))):\n mel, acc, chord, _, _, song_ref = acc_pool[query_length[i]]\n weight_key = f\"l_{str(query_length[i-1]).zfill(2)}_{str(query_length[i]).zfill(2)}\"\n contras_result = edge_weights[weight_key]\n if query_length[i-1] == query_length[i]:\n for j in range(contras_result.shape[0]):\n contras_result[j, j] = -1 #the ith phrase does not transition to itself at i+1\n for k in range(j-1, -1, -1):\n if song_ref[k][0] != song_ref[j][0]:\n break\n contras_result[j, k] = -1 #ith phrase does not transition to its ancestors in the same song.\n if i > 1:\n contras_result = contras_result[[item[-1][1] for item in record]]\n if spotlights is not None:\n for spot_idx in spotlights:\n for ref_idx, ref_item in enumerate(song_ref):\n if ref_item[0] == spot_idx:\n contras_result[:, ref_idx] += 1\n mel_set = mel\n rhy_set = np.concatenate((np.sum(mel_set[:, :, :128], axis=-1, keepdims=True), mel_set[:, :, 128: 130]), axis=-1)\n query_rhy = np.concatenate((np.sum(query_phrases[i][:, : 128], axis=-1, keepdims=True), query_phrases[i][:, 128: 130]), axis=-1)[np.newaxis, :, :]\n rhythm_result = cosine_rhy(query_rhy, rhy_set)\n chord_set = chord\n chord_set, num_total, shift_const = chord_shift(chord_set)\n chord_set_TIV = computeTIV(chord_set)\n query_chord = query_phrases[i][:, 130:][::4]\n query_chord_TIV = computeTIV(query_chord)[np.newaxis, :, :]\n chord_score, arg_chord = cosine(query_chord_TIV, chord_set_TIV)\n sim_this_layer = .5*rhythm_result + .5*chord_score\n sim_this_layer += randomness * np.random.normal(0, 1, size=len(sim_this_layer))\n if spotlights is not None:\n for spot_idx in spotlights:\n for ref_idx, ref_item in enumerate(song_ref):\n if ref_item[0] == spot_idx: \n sim_this_layer[ref_idx] += 1\n score_this_layer = .7*contras_result + .3*np.tile(sim_this_layer[np.newaxis, :], (contras_result.shape[0], 1)) + np.tile(score[:, np.newaxis], (1, contras_result.shape[1]))\n melody_flat = np.argmax(mel_set, axis=-1)\n if seg_query[i] == seg_query[i-1]:\n melody_pre = melody_record\n matrix = np.matmul(melody_pre, np.transpose(melody_flat, (1, 0))) / (np.linalg.norm(melody_pre, axis=-1)[:, np.newaxis]*(np.linalg.norm(np.transpose(melody_flat, (1, 0)), axis=0))[np.newaxis, :])\n if i == 1:\n for k in range(matrix.shape[1]):\n matrix[k, :k] = -1\n else:\n for k in range(len(record)):\n matrix[k, :record[k][-1][1]] = -1\n matrix = (matrix > 0.99) * 1.\n score_this_layer += matrix\n topk = 1\n args = np.argsort(score_this_layer, axis=0)[::-1, :][:topk, :]\n record = []\n for j in range(args.shape[-1]):\n for k in range(args.shape[0]):\n record.append((score_this_layer[args[k, j], j], (args[k, j], j)))\n shift_this_layer = [[shift_const[k]] for k in arg_chord]\n new_path = [path[item[-1][0]] + [(item[-1][1], sim_this_layer[item[-1][1]])] for item in record]\n new_shift = [shift[item[-1][0]] + shift_this_layer[item[-1][1]] for item in record]\n melody_record = melody_flat[[item[-1][1] for item in record]]\n path = new_path\n shift = new_shift\n score = np.array([item[0] for item in record])\n\n arg = score.argsort()[::-1]\n return [path[arg[i]] for i in range(topk)], [shift[arg[i]] for i in range(topk)]" }, { "identifier": "re_harmonization", "path": "piano_arranger/AccoMontage.py", "snippet": "def re_harmonization(lead_sheet, chord_table, query_phrases, indices, shifts, acc_pool, model, get_est=True, tempo=120):\n \"\"\"Re-harmonize the accompaniment texture donors and save in MIDI.\n * lead_sheet: the conditional lead sheet. Its melody track will be taken. Shape: (T, 142), quantized at 1-beat level. This format is defined in R. Yang et al., \"Deep music analogy via latent representation disentanglement,\" ISMIR 2019.\n * chord_table: the conditional chord progression from the lead sheet. Shape: (T', 36), quantized at 1-beat level. This format is defined in Z. Wang et al., \"Learning interpretable representation for controllable polyphonic music generation,\" ISMIR 2020.\n * seg_query: phrase annotation for the lead sheet. Format of each phrase: (label, length, start). For example, seg_query=[('A', 8, 0), ('A', 8, 8), ('B', 4, 16)].\n * indices: the indices of selected texture donor phrases in the acc_pool.\n * shifts: pitch transposition of each selected phrase.\n * acc_pool: search space for piano texture donors.\n * tempo: the tempo to render the piece.\n \"\"\"\n acc_roll = np.empty((0, 128))\n vel_roll = []\n phrase_mean_vel = []\n cc_roll = np.empty((0, 128))\n #retrive texture donor data of the corrresponding indices from the acc_pool\n for i, idx in enumerate(indices):\n length = query_phrases[i][-2]\n shift = shifts[i]\n # notes\n acc_matrix = np.roll(acc_pool[length][1][idx[0]], shift, axis=-1)\n acc_roll = np.concatenate((acc_roll, acc_matrix), axis=0)\n #MIDI velocity\n vel_matrix = np.roll(acc_pool[length][3][idx[0]], shift, axis=-1)\n phrase_mean_vel.append(np.mean(np.ma.masked_equal(vel_matrix, value=0)))\n vel_roll.append(vel_matrix)\n #MIDI control messages (mainly for pedals)\n cc_matrix = acc_pool[length][4][idx[0]]\n cc_roll = np.concatenate((cc_roll, cc_matrix), axis=0)\n # normalize the scale of velocity across different retrieved phrases\n global_mean_vel = np.mean(np.ma.masked_equal(np.concatenate(vel_roll, axis=0), value=0))\n for i in range(len(vel_roll)):\n vel_roll[i][vel_roll[i] > 0] += (global_mean_vel - phrase_mean_vel[i])\n vel_roll = np.concatenate(vel_roll, axis=0)\n #re-harmonization\n if len(acc_roll) % 32 != 0:\n pad_len = (len(acc_roll)//32+1)*32 - len(acc_roll)\n acc_roll = np.pad(acc_roll, ((0, pad_len), (0, 0)))\n vel_roll = np.pad(vel_roll, ((0, pad_len), (0, 0)))\n cc_roll = np.pad(cc_roll, ((0, pad_len), (0, 0)), mode='constant', constant_values=-1)\n chord_table = np.pad(chord_table, ((0, pad_len//4), (0, 0)))\n chord_table[-pad_len:, 0] = -1\n chord_table[-pad_len:, -1] = -1\n acc_roll = acc_roll.reshape(-1, 32, 128)\n chord_table = chord_table.reshape(-1, 8, 36)\n acc_roll = torch.from_numpy(acc_roll).float().cuda()\n acc_roll = torch.clip(acc_roll, min=0, max=31)\n gt_chord = torch.from_numpy(chord_table).float().cuda()\n est_x = model.inference(acc_roll, gt_chord, sample=False)\n acc_roll = cvt.grid2pr(est_x.reshape(-1, 15, 6))\n #interpolate MIDI velocity\n adapt_vel_roll = np.zeros(vel_roll.shape)\n masked_dyn_matrix = np.ma.masked_equal(vel_roll, value=0)\n mean = np.mean(masked_dyn_matrix, axis=-1)\n onsets = np.nonzero(mean.data)\n dynamic = mean.data[onsets]\n onsets = onsets[0].tolist()\n dynamic = dynamic.tolist()\n if not 0 in onsets:\n onsets = [0] + onsets\n dynamic = [dynamic[0]] + dynamic\n if not len(vel_roll)-1 in onsets:\n onsets = onsets + [len(vel_roll)-1]\n dynamic = dynamic + [dynamic[-1]]\n dyn_curve = interp1d(onsets, dynamic)\n for t, p in zip(*np.nonzero(acc_roll)):\n adapt_vel_roll[t, p] = dyn_curve(t)\n adapt_vel_roll = np.clip(adapt_vel_roll, a_min=0, a_max=127)\n #reconstruct MIDI\n accompaniment = np.stack([acc_roll, adapt_vel_roll, cc_roll], axis=-1)[np.newaxis, :, :, :]\n midi_recon = cvt.matrix2midi_with_dynamics(accompaniment, programs=[0], init_tempo=tempo)\n melody_track = cvt.melody_matrix2data(melody_matrix=lead_sheet[:, :130], tempo=tempo)\n midi_recon.instruments = [melody_track] + midi_recon.instruments\n if get_est:\n return midi_recon, est_x\n else:\n return midi_recon" }, { "identifier": "get_texture_filter", "path": "piano_arranger/AccoMontage.py", "snippet": "def get_texture_filter(acc_pool):\n \"\"\"Divide accompaniment texture donors into fifths in terms of voice number (VN) and rhythmic density (RD).\"\"\"\n texture_filter = {}\n for key in acc_pool:\n acc_track = acc_pool[key][1]\n # CALCULATE HORIZONTAL DENSITY (rhythmic density)\n onset_positions = (np.sum(acc_track, axis=-1) > 0) * 1.\n HD = np.sum(onset_positions, axis=-1) / acc_track.shape[1] #(N)\n # CALCULATE VERTICAL DENSITY (voice number)\n beat_positions = acc_track[:, ::4, :]\n downbeat_positions = acc_track[:, ::16, :]\n upbeat_positions = acc_track[:, 2::4, :]\n\n simu_notes_on_beats = np.sum((beat_positions > 0) * 1., axis=-1) #N*T\n simu_notes_on_downbeats = np.sum((downbeat_positions > 0) * 1., axis=-1)\n simu_notes_on_upbeats = np.sum((upbeat_positions > 0) * 1., axis=-1)\n\n VD_beat = np.sum(simu_notes_on_beats, axis=-1) / (np.sum((simu_notes_on_beats > 0) * 1., axis=-1) + 1e-10)\n VD_upbeat = np.sum(simu_notes_on_upbeats, axis=-1) / (np.sum((simu_notes_on_upbeats > 0) * 1., axis=-1) + 1e-10)\n\n VD = np.max(np.stack((VD_beat, VD_upbeat), axis=-1), axis=-1)\n #get five-equal-divident-points of HD\n dst = np.sort(HD)\n HD_anchors = [dst[len(dst) // 5], dst[len(dst) // 5 * 2], dst[len(dst) // 5 * 3], dst[len(dst) // 5 * 4]]\n HD_Bins = [\n HD < HD_anchors[0],\n (HD >= HD_anchors[0]) * (HD < HD_anchors[1]),\n (HD >= HD_anchors[1]) * (HD < HD_anchors[2]),\n (HD >= HD_anchors[2]) * (HD < HD_anchors[3]),\n HD >= HD_anchors[3]\n ]\n #get five-equal-divident-points of VD\n dst = np.sort(VD)\n VD_anchors = [dst[len(dst) // 5], dst[len(dst) // 5 * 2], dst[len(dst) // 5 * 3], dst[len(dst) // 5 * 4]]\n VD_Bins = [\n VD < VD_anchors[0],\n (VD >= VD_anchors[0]) * (VD < VD_anchors[1]),\n (VD >= VD_anchors[1]) * (VD < VD_anchors[2]),\n (VD >= VD_anchors[2]) * (VD < VD_anchors[3]),\n VD >= VD_anchors[3]\n ]\n texture_filter[key] = (HD_Bins, VD_Bins) #((5, N), (5, N))\n return texture_filter" }, { "identifier": "ref_spotlight", "path": "piano_arranger/AccoMontage.py", "snippet": "def ref_spotlight(ref_name_list, reference_check):\n \"\"\"convert spotlight song/artist names into the indices of corresponding pieces in the dataset.\"\"\"\n if ref_name_list is None:\n return None\n check_idx = []\n #POP909 song_id\n for name in ref_name_list:\n line = reference_check[reference_check.song_id == name]\n if not line.empty:\n check_idx.append(line.index)#read by pd, neglect first row, index starts from 0.\n #song name\n for name in ref_name_list:\n line = reference_check[reference_check.name == name]\n if not line.empty:\n check_idx.append(line.index)#read by pd, neglect first row, index starts from 0.\n #artist name\n for name in ref_name_list:\n line = reference_check[reference_check.artist == name]\n if not line.empty:\n check_idx += list(line.index)#read by pd, neglect first row, index starts from 0\n return check_idx" }, { "identifier": "Slakh2100_Pop909_Dataset", "path": "orchestrator/QA_dataset.py", "snippet": "class Slakh2100_Pop909_Dataset(Dataset):\n def __init__(self, slakh_dir, pop909_dir, sample_len=SAMPLE_LEN, hop_len=BAR_HOP_LEN, debug_mode=False, split='train', mode='train', with_dynamics=False, merge_pop909=0):\n super(Slakh2100_Pop909_Dataset, self).__init__()\n self.split = split\n self.mode = mode\n self.debug_mode = debug_mode\n\n self.with_dynamics = with_dynamics\n self.merge_pop909 = merge_pop909\n\n self.memory = dict({'tracks': [],\n 'programs': [],\n 'dynamics': [],\n 'dir': []\n })\n self.anchor_list = []\n self.sample_len = sample_len\n \n if slakh_dir is not None:\n print('loading Slakh2100 Dataset ...')\n self.load_data(slakh_dir, sample_len, hop_len)\n if pop909_dir is not None:\n print('loading Pop909 Dataset ...')\n self.load_data(pop909_dir, sample_len, hop_len)\n\n def __len__(self):\n return len(self.anchor_list)\n \n def __getitem__(self, idx):\n song_id, start = self.anchor_list[idx]\n\n if self.mode == 'train': \n tracks_sample = self.memory['tracks'][song_id][:, start: start+self.sample_len]\n program_sample = self.memory['programs'][song_id]\n #delete empty tracks if any\n non_empty = np.nonzero(np.sum(tracks_sample, axis=(1, 2)))[0]\n tracks_sample = tracks_sample[non_empty]\n program_sample = program_sample[non_empty]\n\n elif (self.mode == 'test') or (self.mode == 'inference'): \n tracks_sample = self.memory['tracks'][song_id][:, start:]\n program_sample = self.memory['programs'][song_id]\n\n if ((len(program_sample) <= 3) and (program_sample == 0).all()):\n #merge pop909 into a single piano track at certain probability\n if np.random.rand() < self.merge_pop909: \n tracks_sample = np.max(tracks_sample, axis=0, keepdims=True)\n program_sample = np.array([0])\n\n if self.with_dynamics:\n dynamics = self.memory['dynamics'][song_id][:, start: start+self.sample_len]\n else: \n dynamics = None\n \n return tracks_sample, program_sample, dynamics, self.memory['dir'][song_id]\n\n\n def slakh_program_mapping(self, programs):\n return np.array([EMBED_PROGRAM_MAPPING[SLAKH_PROGRAM_MAPPING[program]] for program in programs])\n\n\n def load_data(self, data_dir, sample_len, hop_len):\n song_list = [os.path.join(data_dir, self.split, item) for item in os.listdir(os.path.join(data_dir, self.split))]\n if self.debug_mode:\n song_list = song_list[: 10]\n for song_dir in tqdm(song_list):\n song_data = np.load(song_dir)\n tracks = song_data['tracks'] #(n_track, time, 128)\n if 'programs' in song_data:\n programs = song_data['programs'] #(n_track, )\n else:\n programs = np.array([0]*len(tracks))\n\n center_pitch = compute_center_pitch(tracks)\n pitch_sort = np.argsort(center_pitch)[::-1]\n tracks = tracks[pitch_sort]\n programs = programs[pitch_sort]\n\n \"\"\"clipping\"\"\" \n if self.mode == 'train':\n if self.split =='validation':\n # during model training, no overlapping for validation set\n for i in range(0, tracks.shape[1], sample_len):\n if i + sample_len >= tracks.shape[1]:\n break\n self.anchor_list.append((len(self.memory['tracks']), i)) #(song_id, start, total_length)\n else:\n # otherwise, hop size is 1-bar\n downbeats = np.nonzero(song_data['db_indicator'])[0]\n for i in range(0, len(downbeats), hop_len):\n if downbeats[i] + sample_len >= tracks.shape[1]:\n break\n self.anchor_list.append((len(self.memory['tracks']), downbeats[i])) #(song_id, start)\n\n elif (self.mode == 'test') or (self.mode == 'inference'):\n start = np.nonzero(song_data['db_indicator'])[0][0]\n end = start + (tracks.shape[1] - start) // sample_len * sample_len\n if end < tracks.shape[1]:\n pad_len = end + sample_len - tracks.shape[1]\n end += sample_len\n tracks = np.pad(tracks, ((0, 0), (0, pad_len), (0, 0)), mode='constant', constant_values=(0,))\n tracks = tracks[:, start: end]\n self.anchor_list.append((len(self.memory['tracks']), start))\n\n self.memory['tracks'].append(tracks)\n self.memory['programs'].append(self.slakh_program_mapping(programs))\n self.memory['dir'].append(song_dir)\n\n if self.with_dynamics:\n self.memory['dynamics'].append(song_data['dynamics'])" }, { "identifier": "collate_fn", "path": "orchestrator/QA_dataset.py", "snippet": "def collate_fn(batch, device, pitch_shift=True):\n #print(batch)\n max_tracks = max([max(len(item[0]), 1) for item in batch])\n\n tracks = [] \n mixture = []\n instrument = []\n aux_feature = []\n mask = [] #track-wise pad mask\n function = []\n\n if pitch_shift:\n aug_p = AUG_P / AUG_P.sum()\n aug_shift = np.random.choice(np.arange(-6, 6), 1, p=aug_p)[0]\n else:\n aug_shift = 0\n\n for pr, programs, _, _ in batch:\n pr = pr_mat_pitch_shift(pr, aug_shift)\n aux, _, func = compute_pr_feat(pr)\n mask.append([0]*len(pr) + [1]*(max_tracks-len(pr)))\n\n pr = np.pad(pr, ((0, max_tracks-len(pr)), (0, 0), (0, 0)), mode='constant', constant_values=(0,))\n programs = np.pad(programs, (0, max_tracks-len(programs)), mode='constant', constant_values=(NUM_INSTR_CLASS,))\n aux = np.pad(aux, ((0, max_tracks-len(aux)), (0, 0), (0, 0)), mode='constant', constant_values=(0,))\n func = np.pad(func, ((0, max_tracks-len(func)), (0, 0)), mode='constant', constant_values=(0,))\n\n mix = pr2grid(np.max(pr, axis=0), max_note_count=32)\n grid = np.array([pr2grid(matrix) for matrix in pr])\n\n tracks.append(grid)\n mixture.append(mix)\n instrument.append(programs)\n aux_feature.append(aux)\n function.append(func)\n\n return torch.from_numpy(np.array(mixture)).long().to(device), \\\n torch.from_numpy(np.array(instrument)).to(device), \\\n torch.from_numpy(np.array(function)).float().to(device),\\\n torch.from_numpy(np.array(tracks)).long().to(device), \\\n torch.from_numpy(np.array(aux_feature)).float().to(device), \\\n torch.BoolTensor(mask).to(device)" }, { "identifier": "compute_pr_feat", "path": "orchestrator/QA_dataset.py", "snippet": "def compute_pr_feat(pr):\n #pr: (track, time, 128)\n onset = (np.sum(pr, axis=-1) > 0) * 1. #(track, time)\n rhy_intensity = np.clip(np.sum((pr > 0) * 1., axis=-1) / 14, a_min=None, a_max=1) #(track, time)\n\n weight = np.sum(pr, axis=-1)\n weight[weight==0] = 1\n pitch_center = np.sum(np.arange(0, 128)[np.newaxis, np.newaxis, :] * pr, axis=-1) / weight / 128\n\n feature = np.stack((onset, rhy_intensity, pitch_center), axis=-1)\n\n func_pitch = np.sum((pr > 0) * 1., axis=-2) / 32\n\n func_time = rhy_intensity.copy()\n \n return feature, func_pitch, func_time" }, { "identifier": "EMBED_PROGRAM_MAPPING", "path": "orchestrator/QA_dataset.py", "snippet": "EMBED_PROGRAM_MAPPING = dict({\n 0: 0, 4: 1, 8: 2, 16: 3, 24: 4, 26: 5, 29: 6, 32: 7,\\\n 33: 8, 40: 9, 41: 10, 42: 11, 43: 12, 46: 13, 47: 14, 48: 15,\\\n 50: 16, 52: 17, 55: 18, 56: 19, 57: 20, 58: 21, 60: 22, 61: 23, \n 64: 24, 66: 25, 67: 26, 68: 27, 69: 28, 70: 29, 71: 30, 72: 31,\\\n 80: 32, 88: 33})" }, { "identifier": "Prior", "path": "orchestrator/prior_model.py", "snippet": "class Prior(nn.Module):\n def __init__(self, mixture_encoder=None,\n function_encoder=None,\n context_enc_layer=12, \n function_dec_layer=12, \n d_model=256, \n nhead=8, \n dim_feedforward=1024, \n dropout=.1, \n function_resolution=8,\n inference=False,\n QA_model=None,\n DEVICE='cuda:0'):\n super(Prior, self).__init__()\n\n # embeddings\n self.func_embedding = nn.Embedding(num_embeddings=NUM_TIME_CODE+1, embedding_dim=d_model, padding_idx=NUM_TIME_CODE)\n self.prog_embedding = nn.Embedding(num_embeddings=NUM_INSTR_CLASS+1, embedding_dim=d_model, padding_idx=NUM_INSTR_CLASS)\n self.total_len_embedding = nn.Embedding(num_embeddings=len(TOTAL_LEN_BIN)+1, embedding_dim=d_model, padding_idx=len(TOTAL_LEN_BIN))\n self.abs_pos_embedding = nn.Embedding(num_embeddings=len(ABS_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(ABS_POS_BIN))\n self.rel_pos_embedding = nn.Embedding(num_embeddings=len(REL_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(REL_POS_BIN))\n\n self.start_embedding = nn.Parameter(torch.empty(NUM_INSTR_CLASS+1, d_model))\n nn.init.normal_(self.start_embedding)\n with torch.no_grad():\n self.start_embedding[NUM_INSTR_CLASS].fill_(0)\n\n #pre-trained encoders\n if not inference:\n self.mixture_encoder = mixture_encoder\n for param in self.mixture_encoder.parameters():\n param.requires_grad = False\n self.function_encoder = function_encoder\n for param in self.function_encoder.parameters():\n param.requires_grad = False\n else:\n self.QA_model = QA_model\n self.mixture_encoder = self.QA_model.mixture_enc\n self.function_encoder = self.QA_model.function_enc\n\n \n self.context_enc = nn.TransformerEncoder(\n nn.TransformerEncoderLayer(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n activation=F.gelu, \n batch_first=True, \n norm_first=True,\n device=DEVICE),\n num_layers=context_enc_layer)\n #multi-track Transformer\n self.mt_trf = nn.ModuleDict({})\n for layer in range(function_dec_layer):\n self.mt_trf[f'track_layer_{layer}'] = TransformerEncoderLayerRPE(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n norm_first=True,\n max_len=18).to(DEVICE)\n self.mt_trf[f'time_layer_{layer}'] = nn.TransformerDecoderLayer(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n activation=F.gelu, \n batch_first=True, \n norm_first=True,\n device=DEVICE)\n \n #positional encoding\n self.max_len = 1000\n position = torch.arange(self.max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n pe = torch.zeros(1, self.max_len, d_model)\n pe[0, :, 0::2] = torch.sin(position * div_term)\n pe[0, :, 1::2] = torch.cos(position * div_term)\n pe = pe.to(DEVICE)\n self.register_buffer('pe', pe)\n \n #decoder output module \n self.func_out_linear = nn.Linear(d_model, NUM_TIME_CODE)\n\n #constants\n self.d_model = d_model\n self.function_dec_layer = function_dec_layer\n self.func_res = function_resolution\n\n #loss function\n self.criterion = nn.CrossEntropyLoss(reduction='mean')\n\n\n def generate_square_subsequent_mask(self, sz=15):\n return torch.triu(torch.ones(sz, sz), diagonal=1).bool()\n\n\n def func_get_next_token(self, token, gt=None):\n #token: (batch, codebook_size)\n #gt: (bs,)\n if gt is None:\n idx = token.max(-1)[1]\n else:\n idx = gt\n token = torch.zeros_like(token, device=token.device)\n arange = torch.arange(token.shape[0], device=token.device).long()\n token[arange, idx] = 1\n return token.unsqueeze(1) #one-hot shaoe (batch, 1, ft_codebook_size)\n\n \n\n\n def run(self, mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos, inference=False):\n #mix: (batch, max_time, 256)\n #prog: (batch, max_track)\n #function: (batch, max_time, max_track, 8)\n #tm_mask: (batch, max_time)\n #tk_mask: (batch, max_track)\n #total_len: (batch, max_time)\n #abs_pos: (batch, max_time)\n #rel_pos: (batch, max_time)\n batch, max_time, _ = mix.shape\n _, max_track = prog.shape\n \n mix = mix + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix = mix + self.total_len_embedding(total_len)\n mix = mix + self.abs_pos_embedding(abs_pos)\n mix = mix + self.rel_pos_embedding(rel_pos)\n \n mix = self.context_enc(mix) #(batch, max_time, 256)\n mix = mix.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, max_time, 256)\n mix = mix.reshape(-1, max_time, self.d_model)\n\n function = function.permute(0, 1, 3, 2).reshape(batch, -1, max_track)\n func = self.func_embedding(function)#(batch, 8*max_time, max_track, d_model)\n \n func = torch.cat([\n self.start_embedding[prog].unsqueeze(1), #(batch, 1, max_track, d_model)\n func[:, :-1]], \n dim=1) #batch, 8*max_time, max_track, d_model\n\n func = func + self.prog_embedding(prog).unsqueeze(1) \n\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func, \n src_key_padding_mask=tk_mask.unsqueeze(1).repeat(1, self.func_res*max_time, 1).reshape(-1, max_track))\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, self.func_res*max_time, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(self.func_res*max_time).to(func.device),\n tgt_key_padding_mask=tm_mask.unsqueeze(1).repeat(1, max_track, 1).reshape(-1, max_time).repeat_interleave(self.func_res, dim=-1),\n memory=mix) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3) #(batch, 8*max_time, max_track, d_model)\n\n function_recon = self.func_out_linear(func)\n\n return function_recon, function\n\n \n\n def loss_function(self, function_recon, function_gt, tm_mask, tk_mask):\n\n mask = torch.logical_or(tm_mask.repeat_interleave(8, dim=-1).unsqueeze(-1), tk_mask.unsqueeze(1)) #(batch, 8*max_time, track) \n unmask = torch.logical_not(mask)\n\n function_loss = self.criterion(function_recon[unmask].reshape(-1, NUM_TIME_CODE), \n function_gt[unmask].reshape(-1))\n return function_loss\n \n\n def loss(self, mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos):\n output = self.run(mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos, inference=False)\n return self.loss_function(*output, tm_mask, tk_mask)\n \n\n def forward(self, mode, *input, **kwargs):\n if mode in [\"run\", 0]:\n return self.run(*input, **kwargs)\n elif mode in ['loss', 'train', 1]:\n return self.loss(*input, **kwargs)\n elif mode in ['inference', 'eval', 'val', 2]:\n return self.inference(*input, **kwargs)\n else:\n raise NotImplementedError\n\n\n def run_autoregressive_greedy(self, mix, prog, function, total_len, abs_pos, rel_pos, blur=.5):\n #mix: (batch, num2bar, bar_resolution, max_simu_note, 6)\n #prog: (batch, max_track)\n #function: (batch, 1, max_track, 32)\n #total_len: (batch, num2bar)\n #abs_pos: (batch, num2bar)\n #rel_pos: (batch, num2bar)\n batch, num_2bar, time, max_simu_note, _ = mix.shape\n _, max_track = prog.shape\n\n mix = mix.reshape(-1, time, max_simu_note, 6)\n mix = self.mixture_encoder(mix)[0].mean.reshape(batch, num_2bar, -1) #(batch, num_2bar, 256)\n mix_ = (1-blur)*mix.clone() + blur*torch.empty(mix.shape, device=mix.device).normal_(mean=0, std=1) \n \n mix_ = mix_ + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix_ = mix_ + self.total_len_embedding(total_len)\n mix_ = mix_ + self.abs_pos_embedding(abs_pos)\n mix_ = mix_ + self.rel_pos_embedding(rel_pos)\n\n mix_ = self.context_enc(mix_) #(batch, num_bar, 256)\n mix_ = mix_.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, num_bar, 256)\n mix_ = mix_.reshape(-1, num_2bar, self.d_model)\n \n function = function.reshape(-1, 32)\n function = self.function_encoder.get_code_indices(function).reshape(batch, max_track, self.func_res)\n\n\n for idx in range(self.func_res, self.func_res*num_2bar):\n func = self.func_embedding(function) #*batch, max_track, 8, d_model\n func = func.permute(0, 2, 1, 3).reshape(batch, -1, max_track, self.d_model)\n\n func = func + self.prog_embedding(prog).unsqueeze(1)\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n \n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func)\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, idx, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(sz=idx).to(func.device),\n memory=mix_) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3) #(batch, num2bar-1, max_track, d_model)\n\n \n func_pred = self.func_out_linear(func[:, -1,]).max(-1)[1].unsqueeze(-1)\n\n function = torch.cat([function, func_pred], dim=-1)\n if function.shape[1] == self.func_res*num_2bar:\n break\n \n function = function.reshape(batch, max_track, num_2bar, self.func_res).permute(0, 2, 1, 3)\n z_func = self.function_encoder.infer_by_codes(function)\n return self.QA_model.infer_with_function_codes(mix[0], prog[0].repeat(num_2bar, 1), z_func[0])\n \n\n def run_autoregressive_nucleus(self, mix, prog, func_prompt, total_len, abs_pos, rel_pos, blur=.5, p=.1, t=1):\n #mix: (batch, num2bar, bar_resolution, max_simu_note, 6)\n #prog: (batch, max_track)\n #func_prompt: (batch, 1, max_track, 32)\n #total_len: (batch, num2bar)\n #abs_pos: (batch, num2bar)\n #rel_pos: (batch, num2bar)\n\n batch, num_2bar, time, max_simu_note, _ = mix.shape\n _, max_track = prog.shape\n\n mix = mix.reshape(-1, time, max_simu_note, 6)\n mix = self.mixture_encoder(mix)[0].mean.reshape(batch, num_2bar, -1) #(batch, num_2bar, 256)\n mix_ = (1-blur)*mix.clone() + blur*torch.empty(mix.shape, device=mix.device).normal_(mean=0, std=1) \n \n mix_ = mix_ + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix_ = mix_ + self.total_len_embedding(total_len)\n mix_ = mix_ + self.abs_pos_embedding(abs_pos)\n mix_ = mix_ + self.rel_pos_embedding(rel_pos)\n\n mix_ = self.context_enc(mix_) #(batch, num_bar, 256)\n mix_ = mix_.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, num_bar, 256)\n mix_ = mix_.reshape(-1, num_2bar, self.d_model)\n \n start = self.start_embedding[prog].unsqueeze(1) #(batch, 1, max_track, dmodel)\n\n if func_prompt is not None:\n func_prompt = func_prompt.reshape(-1, 32)\n func_prompt = self.function_encoder.get_code_indices(func_prompt).reshape(batch, max_track, self.func_res).permute(0, 2, 1) #(batch, 8, max_track)\n #else:\n function = torch.empty((batch, 0, max_track)).long().to(mix.device)\n\n for idx in range(self.func_res*num_2bar):\n if (idx < self.func_res) and (func_prompt is not None):\n start = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n function = torch.cat([function, func_prompt[:, idx: idx+1, :]], dim=1) \n continue\n else:\n func = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n\n func = func + self.prog_embedding(prog).unsqueeze(1)\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n \n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func)\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, idx+1, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(sz=idx+1).to(func.device),\n memory=mix_) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3)#(batch, num2bar-1, max_track, d_model)\n \n start = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n\n func_logits = self.func_out_linear(func[:, -1,]) / t\n filtered_func_logits = self.nucleus_filter(func_logits, p)\n func_probability = F.softmax(filtered_func_logits, dim=-1)\n func_pred = torch.multinomial(func_probability.reshape(-1, NUM_TIME_CODE), 1).reshape(func_probability.shape[:-1]).unsqueeze(1)\n\n function = torch.cat([function, func_pred], dim=1)\n if function.shape[1] == self.func_res*num_2bar:\n break\n \n\n \n function = function.reshape(batch, num_2bar, self.func_res, max_track).permute(0, 1, 3, 2)\n z_func = self.function_encoder.infer_by_codes(function)\n return self.QA_model.infer_with_function_codes(mix[0], prog[0].repeat(num_2bar, 1), z_func[0])\n \n def nucleus_filter(self, logits, p):\n #sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n sorted_logits, sorted_indices = torch.sort(logits, dim=-1, descending=True)\n #cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n cum_sum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n #sorted_indices_to_remove = cumulative_probs > p\n nucleus = cum_sum_probs < p\n # Shift the indices to the right to keep also the first token above the threshold\n #sorted_indices_to_remove = torch.cat([sorted_indices_to_remove.new_zeros(sorted_indices_to_remove.shape[:-1] + (1,)), sorted_indices_to_remove[..., :-1]], dim=-1)\n nucleus = torch.cat([nucleus.new_ones(nucleus.shape[:-1] + (1,)), nucleus[..., :-1]], dim=-1)\n nucleus = nucleus.gather(-1, sorted_indices.argsort(-1))\n\n logits[~nucleus] = float('-inf')\n return logits\n \n\n\n @classmethod\n def init_model(cls, pretrain_model_path=None, DEVICE='cuda:0'):\n \"\"\"Fast model initialization.\"\"\"\n vqQaA = Query_and_reArrange(name='pretrain', trf_layers=2, device=DEVICE)\n if pretrain_model_path is not None:\n vqQaA.load_state_dict(torch.load(pretrain_model_path, map_location=torch.device('cpu')))\n vqQaA.eval()\n model = cls(vqQaA.mixture_enc, vqQaA.function_enc, DEVICE=DEVICE).to(DEVICE)\n return model\n \n @classmethod\n def init_inference_model(cls, prior_model_path, QA_model_path, DEVICE='cuda:0'):\n \"\"\"Fast model initialization.\"\"\"\n vqQaA = Query_and_reArrange(name='pretrain', trf_layers=2, device=DEVICE)\n vqQaA.load_state_dict(torch.load(QA_model_path, map_location=torch.device('cpu')))\n vqQaA.eval()\n model = cls(inference=True, QA_model=vqQaA, DEVICE=DEVICE).to(DEVICE)\n model.load_state_dict(torch.load(prior_model_path), strict=False)\n return model" }, { "identifier": "SLAKH_CLASS_PROGRAMS", "path": "orchestrator/QA_dataset.py", "snippet": "SLAKH_CLASS_PROGRAMS = dict({\n 0: 'Acoustic Piano', #0\n 4: 'Electric Piano', #1\n 8: 'Chromatic Percussion',#2\n 16: 'Organ', #3\n 24: 'Acoustic Guitar', #4\n 26: 'Clean Electric Guitar', #5\n 29: 'Distorted Electric Guitar', #6\n 32: 'Acoustic Bass', #7\n 33: 'Electric Bass', #8\n 40: 'Violin', #9\n 41: 'Viola', #10\n 42: 'Cello', #11\n 43: 'Contrabass', #12\n 46: 'Orchestral Harp', #13\n 47: 'Timpani', #14\n 48: 'String Ensemble', #15\n 50: 'Synth Strings', #16\n 52: 'Choir and Voice', #17\n 55: 'Orchestral Hit', #18\n 56: 'Trumpet', #19\n 57: 'Trombone', #20\n 58: 'Tuba', #21\n 60: 'French Horn', #22\n 61: 'Brass Section', #23\n 64: 'Soprano/Alto Sax', #24\n 66: 'Tenor Sax', #25\n 67: 'Baritone Sax', #26\n 68: 'Oboe', #27\n 69: 'English Horn', #28\n 70: 'Bassoon', #29\n 71: 'Clarinet', #30\n 72: 'Pipe', #31\n 80: 'Synth Lead', #32\n 88: 'Synth Pad' #33\n})" }, { "identifier": "grid2pr", "path": "orchestrator/utils/format_convert.py", "snippet": "def grid2pr(grid, max_note_count=16, min_pitch=0, pitch_eos_ind=129):\n #grid: (time, max_simu_note, 6)\n if grid.shape[1] == max_note_count:\n grid = grid[:, 1:]\n pr = np.zeros((grid.shape[0], 128), dtype=int)\n for t in range(grid.shape[0]):\n for n in range(grid.shape[1]):\n note = grid[t, n]\n if note[0] == pitch_eos_ind:\n break\n pitch = note[0] + min_pitch\n dur = int(''.join([str(_) for _ in note[1:]]), 2) + 1\n pr[t, pitch] = dur\n return pr" }, { "identifier": "pr2grid", "path": "orchestrator/utils/format_convert.py", "snippet": "def pr2grid(pr_mat, max_note_count=16, max_pitch=127, min_pitch=0,\n pitch_pad_ind=130, dur_pad_ind=2,\n pitch_sos_ind=128, pitch_eos_ind=129):\n pr_mat3d = np.ones((len(pr_mat), max_note_count, 6), dtype=int) * dur_pad_ind\n pr_mat3d[:, :, 0] = pitch_pad_ind\n pr_mat3d[:, 0, 0] = pitch_sos_ind\n cur_idx = np.ones(len(pr_mat), dtype=int)\n for t, p in zip(*np.where(pr_mat != 0)):\n pr_mat3d[t, cur_idx[t], 0] = p - min_pitch\n binary = np.binary_repr(min(int(pr_mat[t, p]), 32) - 1, width=5)\n pr_mat3d[t, cur_idx[t], 1: 6] = \\\n np.fromstring(' '.join(list(binary)), dtype=int, sep=' ')\n if cur_idx[t] == max_note_count-1:\n continue\n cur_idx[t] += 1\n #print(cur_idx)\n pr_mat3d[np.arange(0, len(pr_mat)), cur_idx, 0] = pitch_eos_ind\n return pr_mat3d" }, { "identifier": "matrix2midi", "path": "orchestrator/utils/format_convert.py", "snippet": "def matrix2midi(matrices, programs, init_tempo=120, time_start=0):\n \"\"\"\n Reconstruct a multi-track midi from a 3D matrix of shape (Track. Time, 128).\n \"\"\"\n ACC = 16\n tracks = []\n for program in programs:\n track_recon = pyd.Instrument(program=int(program), is_drum=False, name=pyd.program_to_instrument_name(int(program)))\n tracks.append(track_recon)\n\n indices_track, indices_onset, indices_pitch = np.nonzero(matrices)\n alpha = 1 / (ACC // 4) * 60 / init_tempo #timetep between each quntization bin\n for idx in range(len(indices_track)):\n track_id = indices_track[idx]\n onset = indices_onset[idx]\n pitch = indices_pitch[idx]\n\n start = onset * alpha\n duration = matrices[track_id, onset, pitch] * alpha\n velocity = 100\n\n note_recon = pyd.Note(velocity=int(velocity), pitch=int(pitch), start=time_start + start, end=time_start + start + duration)\n tracks[track_id].notes.append(note_recon)\n \n midi_recon = pyd.PrettyMIDI(initial_tempo=init_tempo)\n midi_recon.instruments = tracks\n return midi_recon" }, { "identifier": "midi2matrix", "path": "orchestrator/utils/format_convert.py", "snippet": "def midi2matrix(midi, quaver):\n pr_matrices = []\n programs = []\n for track in midi.instruments:\n programs.append(track.program)\n pr_matrix = np.zeros((len(quaver), 128))\n for note in track.notes:\n note_start = np.argmin(np.abs(quaver - note.start))\n note_end = np.argmin(np.abs(quaver - note.end))\n if note_end == note_start:\n note_end = min(note_start + 1, len(quaver) - 1)\n pr_matrix[note_start, note.pitch] = note_end - note_start\n pr_matrices.append(pr_matrix)\n return np.array(pr_matrices), np.array(programs)" }, { "identifier": "TOTAL_LEN_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "TOTAL_LEN_BIN = np.array([4, 7, 12, 15, 20, 23, 28, 31, 36, 39, 44, 47, 52, 55, 60, 63, 68, 71, 76, 79, 84, 87, 92, 95, 100, 103, 108, 111, 116, 119, 124, 127, 132])" }, { "identifier": "ABS_POS_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "ABS_POS_BIN = np.arange(129)" }, { "identifier": "REL_POS_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "REL_POS_BIN = np.arange(128)" } ]
import os import pretty_midi as pyd import numpy as np import torch import piano_arranger.format_converter as cvt from torch.utils.data import DataLoader from scipy.interpolate import interp1d from tqdm import tqdm from piano_arranger.acc_utils import split_phrases from piano_arranger.models import DisentangleVAE from piano_arranger.AccoMontage import find_by_length, dp_search, re_harmonization, get_texture_filter, ref_spotlight from orchestrator import Slakh2100_Pop909_Dataset, collate_fn, compute_pr_feat, EMBED_PROGRAM_MAPPING, Prior from orchestrator.QA_dataset import SLAKH_CLASS_PROGRAMS from orchestrator.utils import grid2pr, pr2grid, matrix2midi, midi2matrix from orchestrator.prior_dataset import TOTAL_LEN_BIN, ABS_POS_BIN, REL_POS_BIN
19,318
texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train') loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE)) REF = [] REF_PROG = [] REF_MIX = [] for (_, prog, function, _, _, _) in loader: prog = prog[0, :] REF.extend([batch for batch in function]) REF_PROG.extend([prog for _ in range(len(function))]) REF_MIX.append(torch.sum(function, dim=1)) REF_MIX = torch.cat(REF_MIX, dim=0) """Initialize orchestration model (Prior + Q&A)""" print('Initialize model ...') prior_model_path = os.path.join(DATA_FILE_ROOT, 'params_prior.pt') QaA_model_path = os.path.join(DATA_FILE_ROOT, 'params_qa.pt') orchestrator = Prior.init_inference_model(prior_model_path, QaA_model_path, DEVICE=DEVICE) orchestrator.to(DEVICE) orchestrator.eval() piano_arranger = DisentangleVAE.init_model(torch.device('cuda')).cuda() piano_arranger.load_state_dict(torch.load(os.path.join(DATA_FILE_ROOT, 'params_reharmonizer.pt'))) print('Finished.') return piano_arranger, orchestrator, (acc_pool, edge_weights, texture_filter), (REF, REF_PROG, REF_MIX) def read_lead_sheet(DEMO_ROOT, SONG_NAME, SEGMENTATION, NOTE_SHIFT, melody_track_ID=0): melody_roll, chord_roll = cvt.leadsheet2matrix(os.path.join(DEMO_ROOT, SONG_NAME, 'lead sheet.mid'), melody_track_ID) assert(len(melody_roll == len(chord_roll))) if NOTE_SHIFT != 0: melody_roll = melody_roll[int(NOTE_SHIFT*4):, :] chord_roll = chord_roll[int(NOTE_SHIFT*4):, :] if len(melody_roll) % 16 != 0: pad_len = (len(melody_roll)//16+1)*16-len(melody_roll) melody_roll = np.pad(melody_roll, ((0, pad_len), (0, 0))) melody_roll[-pad_len:, -1] = 1 chord_roll = np.pad(chord_roll, ((0, pad_len), (0, 0))) chord_roll[-pad_len:, 0] = -1 chord_roll[-pad_len:, -1] = -1 CHORD_TABLE = np.stack([cvt.expand_chord(chord) for chord in chord_roll[::4]], axis=0) LEADSHEET = np.concatenate((melody_roll, chord_roll[:, 1: -1]), axis=-1) #T*142, quantized at 16th query_phrases = split_phrases(SEGMENTATION) #[('A', 8, 0), ('A', 8, 8), ('B', 8, 16), ('B', 8, 24)] midi_len = len(LEADSHEET)//16 anno_len = sum([item[1] for item in query_phrases]) if midi_len > anno_len: LEADSHEET = LEADSHEET[: anno_len*16] CHORD_TABLE = CHORD_TABLE[: anno_len*4] print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is truncated to {anno_len} bars.') elif midi_len < anno_len: pad_len = (anno_len - midi_len)*16 LEADSHEET = np.pad(LEADSHEET, ((0, pad_len), (0, 0))) LEADSHEET[-pad_len:, 129] = 1 CHORD_TABLE = np.pad(CHORD_TABLE, ((0, pad_len//4), (0, 0))) CHORD_TABLE[-pad_len//4:, 11] = -1 CHORD_TABLE[-pad_len//4:, -1] = -1 print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is padded to {anno_len} bars.') melody_queries = [] for item in query_phrases: start_bar = item[-1] length = item[-2] segment = LEADSHEET[start_bar*16: (start_bar+length)*16] melody_queries.append(segment) #melody queries: list of T16*142, segmented by phrases return (LEADSHEET, CHORD_TABLE, melody_queries, query_phrases) def piano_arrangement(pianoRoll, chord_table, melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, piano_arranger, PREFILTER, tempo=100): print('Phrasal Unit selection begins:\n\t', f'{len(query_phrases)} phrases in the lead sheet;\n\t', f'set note density filter: {PREFILTER}.') phrase_indice, chord_shift = dp_search( melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, filter_id=PREFILTER) path = phrase_indice[0] shift = chord_shift[0] print('Re-harmonization begins ...') midi_recon, acc = re_harmonization(pianoRoll, chord_table, query_phrases, path, shift, acc_pool, model=piano_arranger, get_est=True, tempo=tempo) acc = np.array([grid2pr(matrix) for matrix in acc]) print('Piano accompaiment generated!') return midi_recon, acc def prompt_sampling(acc_piano, REF, REF_PROG, REF_MIX, DEVICE='cuda:0'): ref_mix = torch.from_numpy(compute_pr_feat(acc_piano[0:1])[-1]).to(DEVICE) sim_func = torch.nn.CosineSimilarity(dim=-1) distance = sim_func(ref_mix, REF_MIX) distance = distance + torch.normal(mean=torch.zeros(distance.shape), std=0.2*torch.ones(distance.shape)).to(distance.device) sim_values, anchor_points = torch.sort(distance, descending=True) IDX = 0 sim_value = sim_values[IDX] anchor_point = anchor_points[IDX] function = REF[anchor_point] prog = REF_PROG[anchor_point] prog_class = [SLAKH_CLASS_MAPPING[item.item()] for item in prog.cpu().detach().numpy()] program_name = [SLAKH_CLASS_PROGRAMS[item] for item in prog_class] print(f'Prior model initialized with {len(program_name)} tracks:\n\t{program_name}') return prog, function def orchestration(acc_piano, chord_track, prog, function, orchestrator, DEVICE='cuda:0', blur=.5, p=.1, t=4, tempo=100): print('Orchestration begins ...') if chord_track is not None: if len(acc_piano) > len(chord_track): chord_track = np.pad(chord_track, ((0, 0), (len(acc_piano)-len(chord_track)))) else: chord_track = chord_track[:len(acc_piano)] acc_piano = np.max(np.stack([acc_piano, chord_track], axis=0), axis=0) mix = torch.from_numpy(np.array([pr2grid(matrix, max_note_count=32) for matrix in acc_piano])).to(DEVICE) r_pos = np.round(np.arange(0, len(mix), 1) / (len(mix)-1) * len(REL_POS_BIN))
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train') loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE)) REF = [] REF_PROG = [] REF_MIX = [] for (_, prog, function, _, _, _) in loader: prog = prog[0, :] REF.extend([batch for batch in function]) REF_PROG.extend([prog for _ in range(len(function))]) REF_MIX.append(torch.sum(function, dim=1)) REF_MIX = torch.cat(REF_MIX, dim=0) """Initialize orchestration model (Prior + Q&A)""" print('Initialize model ...') prior_model_path = os.path.join(DATA_FILE_ROOT, 'params_prior.pt') QaA_model_path = os.path.join(DATA_FILE_ROOT, 'params_qa.pt') orchestrator = Prior.init_inference_model(prior_model_path, QaA_model_path, DEVICE=DEVICE) orchestrator.to(DEVICE) orchestrator.eval() piano_arranger = DisentangleVAE.init_model(torch.device('cuda')).cuda() piano_arranger.load_state_dict(torch.load(os.path.join(DATA_FILE_ROOT, 'params_reharmonizer.pt'))) print('Finished.') return piano_arranger, orchestrator, (acc_pool, edge_weights, texture_filter), (REF, REF_PROG, REF_MIX) def read_lead_sheet(DEMO_ROOT, SONG_NAME, SEGMENTATION, NOTE_SHIFT, melody_track_ID=0): melody_roll, chord_roll = cvt.leadsheet2matrix(os.path.join(DEMO_ROOT, SONG_NAME, 'lead sheet.mid'), melody_track_ID) assert(len(melody_roll == len(chord_roll))) if NOTE_SHIFT != 0: melody_roll = melody_roll[int(NOTE_SHIFT*4):, :] chord_roll = chord_roll[int(NOTE_SHIFT*4):, :] if len(melody_roll) % 16 != 0: pad_len = (len(melody_roll)//16+1)*16-len(melody_roll) melody_roll = np.pad(melody_roll, ((0, pad_len), (0, 0))) melody_roll[-pad_len:, -1] = 1 chord_roll = np.pad(chord_roll, ((0, pad_len), (0, 0))) chord_roll[-pad_len:, 0] = -1 chord_roll[-pad_len:, -1] = -1 CHORD_TABLE = np.stack([cvt.expand_chord(chord) for chord in chord_roll[::4]], axis=0) LEADSHEET = np.concatenate((melody_roll, chord_roll[:, 1: -1]), axis=-1) #T*142, quantized at 16th query_phrases = split_phrases(SEGMENTATION) #[('A', 8, 0), ('A', 8, 8), ('B', 8, 16), ('B', 8, 24)] midi_len = len(LEADSHEET)//16 anno_len = sum([item[1] for item in query_phrases]) if midi_len > anno_len: LEADSHEET = LEADSHEET[: anno_len*16] CHORD_TABLE = CHORD_TABLE[: anno_len*4] print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is truncated to {anno_len} bars.') elif midi_len < anno_len: pad_len = (anno_len - midi_len)*16 LEADSHEET = np.pad(LEADSHEET, ((0, pad_len), (0, 0))) LEADSHEET[-pad_len:, 129] = 1 CHORD_TABLE = np.pad(CHORD_TABLE, ((0, pad_len//4), (0, 0))) CHORD_TABLE[-pad_len//4:, 11] = -1 CHORD_TABLE[-pad_len//4:, -1] = -1 print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is padded to {anno_len} bars.') melody_queries = [] for item in query_phrases: start_bar = item[-1] length = item[-2] segment = LEADSHEET[start_bar*16: (start_bar+length)*16] melody_queries.append(segment) #melody queries: list of T16*142, segmented by phrases return (LEADSHEET, CHORD_TABLE, melody_queries, query_phrases) def piano_arrangement(pianoRoll, chord_table, melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, piano_arranger, PREFILTER, tempo=100): print('Phrasal Unit selection begins:\n\t', f'{len(query_phrases)} phrases in the lead sheet;\n\t', f'set note density filter: {PREFILTER}.') phrase_indice, chord_shift = dp_search( melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, filter_id=PREFILTER) path = phrase_indice[0] shift = chord_shift[0] print('Re-harmonization begins ...') midi_recon, acc = re_harmonization(pianoRoll, chord_table, query_phrases, path, shift, acc_pool, model=piano_arranger, get_est=True, tempo=tempo) acc = np.array([grid2pr(matrix) for matrix in acc]) print('Piano accompaiment generated!') return midi_recon, acc def prompt_sampling(acc_piano, REF, REF_PROG, REF_MIX, DEVICE='cuda:0'): ref_mix = torch.from_numpy(compute_pr_feat(acc_piano[0:1])[-1]).to(DEVICE) sim_func = torch.nn.CosineSimilarity(dim=-1) distance = sim_func(ref_mix, REF_MIX) distance = distance + torch.normal(mean=torch.zeros(distance.shape), std=0.2*torch.ones(distance.shape)).to(distance.device) sim_values, anchor_points = torch.sort(distance, descending=True) IDX = 0 sim_value = sim_values[IDX] anchor_point = anchor_points[IDX] function = REF[anchor_point] prog = REF_PROG[anchor_point] prog_class = [SLAKH_CLASS_MAPPING[item.item()] for item in prog.cpu().detach().numpy()] program_name = [SLAKH_CLASS_PROGRAMS[item] for item in prog_class] print(f'Prior model initialized with {len(program_name)} tracks:\n\t{program_name}') return prog, function def orchestration(acc_piano, chord_track, prog, function, orchestrator, DEVICE='cuda:0', blur=.5, p=.1, t=4, tempo=100): print('Orchestration begins ...') if chord_track is not None: if len(acc_piano) > len(chord_track): chord_track = np.pad(chord_track, ((0, 0), (len(acc_piano)-len(chord_track)))) else: chord_track = chord_track[:len(acc_piano)] acc_piano = np.max(np.stack([acc_piano, chord_track], axis=0), axis=0) mix = torch.from_numpy(np.array([pr2grid(matrix, max_note_count=32) for matrix in acc_piano])).to(DEVICE) r_pos = np.round(np.arange(0, len(mix), 1) / (len(mix)-1) * len(REL_POS_BIN))
total_len = np.argmin(np.abs(TOTAL_LEN_BIN - len(mix))).repeat(len(mix))
17
2023-10-23 12:36:57+00:00
24k
liuqidong07/MOELoRA-peft
src/MLoRA/peft/peft_model.py
[ { "identifier": "PeftConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})" }, { "identifier": "Gate", "path": "src/MLoRA/peft/shared.py", "snippet": "class Gate(nn.Module):\n \"\"\"Gate\"\"\"\n def __init__(self, peft_config: PeftConfig, adapter_name=\"default\"):\n\n super().__init__()\n\n self.expert_num = peft_config.expert_num\n self.task_num = peft_config.task_num\n self.te_dim = peft_config.task_embedding_dim\n\n #self.lora_task_embedding = nn.Embedding(self.task_num+1, self.te_dim)# 使用embedding来代替线性层\n self.GateL = nn.Linear(self.te_dim, self.expert_num, bias=False)\n self.act = nn.Softmax(dim=1) # 第0维为batch size\n \n def forward(self, task_em):\n\n #task_em = self.lora_task_embedding(x)\n y = self.GateL(task_em)\n y = self.act(y)\n\n return y" }, { "identifier": "GateN", "path": "src/MLoRA/peft/shared.py", "snippet": "class GateN(nn.Module):\n \"\"\"Gate New Function\"\"\"\n def __init__(self, expert_num, task_embedding_dim):\n\n super().__init__()\n\n self.expert_num = expert_num\n self.te_dim = task_embedding_dim\n\n self.GateL = nn.Linear(self.te_dim, self.expert_num, bias=False)\n self.act = nn.Softmax(dim=1) # 第0维为batch size\n \n def forward(self, task_em):\n\n #task_em = self.lora_task_embedding(x)\n y = self.GateL(task_em)\n y = self.act(y)\n\n return y" }, { "identifier": "AdaptionPromptModel", "path": "src/MLoRA/peft/tuners/adaption_prompt.py", "snippet": "class AdaptionPromptModel(nn.Module):\n \"\"\"\n Implements adaption prompts as described in https://arxiv.org/pdf/2303.16199.pdf.\n\n The top L attention modules are replaced with AdaptedAttention modules that wrap the original ones, but insert\n trainable prompts with gates (for zero init).\n\n Notes on the multi-adapter pattern:\n - We store the states of different adapters by keeping a dictionary of AdaptedAttention modules indexed by adapter\n name.\n - Every time we switch adapters, we remove the modules of the currently active adapter from the model, store them\n in the dictionary, and replace them with the modules of the new adapter.\n - To avoid duplicated and potentially inconsistent state, the currently active adapter is always removed from the\n dictionary.\n - Disabling the adapter would also result in the modules being removed from the model.\n \"\"\"\n\n def __init__(self, model, configs: Dict, adapter_name: str):\n super().__init__()\n self.model = model\n # Store adapter configs by name.\n self._configs: Dict[str, AdaptionPromptConfig] = {}\n # Store lists of the parents of the affected attention modules by adapter name.\n # We keep references to the parents so we can swap the adapters in-and-out of the model.\n self._parents: Dict[str, List[nn.Module]] = {}\n # Store lists of cached AdaptedAttention modules by name.\n self._cached_adapters: Dict[str, List] = {}\n # The name of the currently active adapter.\n self._active_adapter = None\n # Whether the adapter is enabled.\n self._enabled = True\n self.forward = self.model.forward\n self.add_adapter(adapter_name, configs[adapter_name])\n self._mark_only_adaption_prompts_as_trainable()\n\n def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None:\n \"\"\"Add an adapter with the given name and config.\"\"\"\n config = prepare_config(config, self.model)\n if adapter_name in self._configs:\n raise ValueError(f\"Adapter with name '{adapter_name}' already exists.\")\n\n parents = []\n for name, _ in self.model.named_modules():\n if name.endswith(config.target_modules):\n par, _, _ = _get_submodules(self.model, name)\n parents.append(par)\n if len(parents) < config.adapter_layers:\n raise ValueError(\n f\"Config specifies more adapter layers '{config.adapter_layers}'\"\n f\" than the model has '{len(parents)}'.\"\n )\n # Note that if the target modules are not in Sequential, ModuleList, or\n # some other PyTorch ordered container, the behavior is undefined as we\n # assume here that the order of the modules is the same as the order of\n # the transformer decoder layers.\n parents = parents[-config.adapter_layers :]\n self._parents[adapter_name] = parents\n\n # It is only None during initialization.\n # If it is disabled, we don't have to remove the modules.\n if self._active_adapter is not None and self._enabled:\n self._remove_adapted_attentions(self._active_adapter)\n self._active_adapter = adapter_name\n self._configs[adapter_name] = config\n self._create_adapted_attentions(config, parents)\n if not self._enabled:\n self._remove_adapted_attentions(self._active_adapter)\n\n if config.inference_mode:\n _freeze_adapter(self.model, adapter_name)\n\n def set_adapter(self, adapter_name: str) -> None:\n \"\"\"Set the model to use the adapter with the given name.\"\"\"\n if self._active_adapter == adapter_name:\n return\n if adapter_name not in self._configs:\n raise ValueError(f\"Adapter with name '{adapter_name}' does not exist.\")\n\n if self._enabled:\n self._remove_adapted_attentions(self._active_adapter)\n self._set_adapted_attentions(adapter_name)\n\n self._active_adapter = adapter_name\n\n def enable_adapter_layers(self):\n \"\"\"Enable adapter layers by swapping in cached AdaptedAttention modules.\"\"\"\n self._enabled = True\n self._set_adapted_attentions(self._active_adapter)\n\n def disable_adapter_layers(self):\n \"\"\"Disable adapter layers by swapping out AdaptedAttention modules.\"\"\"\n self._enabled = False\n self._remove_adapted_attentions(self._active_adapter)\n\n def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None:\n \"\"\"Wrap LlamaAttention modules with newly created AdaptedAttention modules.\"\"\"\n for par in parents:\n attn = AdaptedAttention(\n model_type=self.model.config.model_type,\n adapter_len=config.adapter_len,\n model=getattr(par, config.target_modules),\n )\n setattr(par, config.target_modules, attn)\n\n def _set_adapted_attentions(self, adapter_name: str) -> None:\n \"\"\"Replace LlamaAttention modules with cached AdaptedAttention modules.\"\"\"\n cached = self._cached_adapters[adapter_name]\n del self._cached_adapters[adapter_name]\n config = self._configs[adapter_name]\n for i, par in enumerate(self._parents[adapter_name]):\n setattr(par, config.target_modules, cached[i])\n\n def _remove_adapted_attentions(self, adapter_name: str) -> None:\n \"\"\"Remove AdaptedAttention modules from the model and store them in the cache.\"\"\"\n config = self._configs[adapter_name]\n adapted_attentions = []\n for par in self._parents[adapter_name]:\n attn = getattr(par, config.target_modules)\n adapted_attentions.append(attn)\n setattr(par, config.target_modules, attn.model)\n self._cached_adapters[adapter_name] = adapted_attentions\n\n def _mark_only_adaption_prompts_as_trainable(self) -> None:\n \"\"\"Freeze all parameters of the model except the adaption prompts.\"\"\"\n for n, p in self.model.named_parameters():\n if not is_adaption_prompt_trainable(n):\n p.requires_grad = False\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n # This is necessary as e.g. causal models have various methods that we\n # don't want to re-implement here.\n return getattr(self.model, name)" }, { "identifier": "LoraModel", "path": "src/MLoRA/peft/tuners/lora.py", "snippet": "class LoraModel(torch.nn.Module):\n \"\"\"\n Creates Low Rank Adapter (Lora) model from a pretrained transformers model.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The model to be adapted.\n config ([`LoraConfig`]): The configuration of the Lora model.\n\n Returns:\n `torch.nn.Module`: The Lora model.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig\n >>> from peft import LoraModel, LoraConfig\n\n >>> config = LoraConfig(\n ... peft_type=\"LORA\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... r=8,\n ... lora_alpha=32,\n ... target_modules=[\"q\", \"v\"],\n ... lora_dropout=0.01,\n ... )\n\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> lora_model = LoraModel(config, model)\n ```\n\n **Attributes**:\n - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n super().__init__()\n self.model = model\n self.forward = self.model.forward\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_lora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias) # freeze all layers except for lora layer\n if self.peft_config[adapter_name].inference_mode: # if inference, also freeze lora layer\n _freeze_adapter(self.model, adapter_name)\n\n def _find_and_replace(self, adapter_name):\n \"\"\"Replace the target `Linear` module with LoRA layer (Linear+LoRA)\"\"\"\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key) # parent: the parent mudle of target (e.g., SelfAttention), target: target module (e.g., nn.Linear()), target name: the name of target module (e.g., query_key_value)\n bias = target.bias is not None\n if isinstance(target, LoraLayer): # if the target is LoraLayer, only need to update the parameters\n target.update_layer(\n adapter_name,\n lora_config.r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else: # if not, get the lora parameter for create.\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n eightbit_kwargs = kwargs.copy()\n eightbit_kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = Linear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **eightbit_kwargs\n )\n else: # create based on the original module type\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = Linear(adapter_name, in_features, out_features, bias=bias, **kwargs) # create the lora module, here is not the raw nn.Linear, but the lora layer\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def _replace_module(self, parent_module, child_name, new_module, old_module):\n \"\"\"substitute the original nn.Linear to new Linear (nn.Linear+LoRA block)\"\"\"\n setattr(parent_module, child_name, new_module)\n new_module.weight = old_module.weight\n if old_module.bias is not None:\n new_module.bias = old_module.bias\n if getattr(old_module, \"state\", None) is not None: # synchronize the state and device\n new_module.state = old_module.state\n new_module.to(old_module.weight.device)\n\n # dispatch to correct device\n for name, module in new_module.named_modules():\n if \"lora_\" in name:\n module.to(old_module.weight.device)\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def get_peft_config_as_dict(self, inference: bool = False):\n config_dict = {}\n for key, value in self.peft_config.items():\n config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}\n if inference:\n config[\"inference_mode\"] = True\n config_dict[key] = config\n return config\n\n def _set_adapter_layers(self, enabled=True):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.disable_adapters = False if enabled else True\n\n def enable_adapter_layers(self):\n self._set_adapter_layers(enabled=True)\n\n def disable_adapter_layers(self):\n self._set_adapter_layers(enabled=False)\n\n def set_adapter(self, adapter_name):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n if module.merged:\n warnings.warn(\"Adapter cannot be set when the model is merged. Unmerging the model first.\")\n module.unmerge()\n module.active_adapter = adapter_name\n\n def merge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.merge()\n\n def unmerge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.unmerge()\n\n @staticmethod\n def _prepare_lora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config[\"model_type\"]]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config\n\n def merge_and_unload(self):\n r\"\"\"\n This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model\n as a standalone model.\n \"\"\"\n if getattr(self.config, \"model_type\", None) == \"gpt2\":\n raise ValueError(\"GPT2 models are not supported for merging LORA layers\")\n\n if getattr(self.model, \"is_loaded_in_8bit\", False):\n raise ValueError(\"Cannot merge LORA layers when the model is loaded in 8-bit mode\")\n\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n try:\n parent, target, target_name = _get_submodules(self.model, key)\n except AttributeError:\n continue\n if isinstance(target, LoraLayer):\n bias = target.bias is not None\n new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n target.merge()\n self._replace_module(parent, target_name, new_module, target)\n\n # save any additional trainable modules part of `modules_to_save`\n if isinstance(target, ModulesToSaveWrapper):\n setattr(parent, target_name, target.modules_to_save[target.active_adapter])\n\n return self.model\n\n def add_weighted_adapter(self, adapters, weights, adapter_name):\n if len({self.peft_config[adapter].r for adapter in adapters}) != 1:\n raise ValueError(\"All adapters must have the same r value\")\n self.peft_config[adapter_name] = self.peft_config[adapters[0]]\n self.peft_config[adapter_name].lora_alpha = self.peft_config[adapters[0]].r\n self._find_and_replace(adapter_name)\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n _freeze_adapter(self.model, adapter_name)\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n _, target, _ = _get_submodules(self.model, key)\n if isinstance(target, LoraLayer):\n target.lora_A[adapter_name].weight.data = target.lora_A[adapter_name].weight.data * 0.0\n target.lora_B[adapter_name].weight.data = target.lora_B[adapter_name].weight.data * 0.0\n for adapter, weight in zip(adapters, weights):\n if adapter not in target.lora_A:\n continue\n target.lora_A[adapter_name].weight.data += (\n target.lora_A[adapter].weight.data * weight * target.scaling[adapter]\n )\n target.lora_B[adapter_name].weight.data += target.lora_B[adapter].weight.data * weight" }, { "identifier": "AdaLoraModel", "path": "src/MLoRA/peft/tuners/adalora.py", "snippet": "class AdaLoraModel(LoraModel):\n \"\"\"\n Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:\n https://openreview.net/pdf?id=lq62uWRJjiY\n\n Args:\n model ([`transformers.PreTrainedModel`]): The model to be adapted.\n config ([`AdaLoraConfig`]): The configuration of the AdaLora model.\n\n Returns:\n `torch.nn.Module`: The AdaLora model.\n\n Example::\n\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig\n >>> config = AdaLoraConfig(\n peft_type=\"ADALORA\", task_type=\"SEQ_2_SEQ_LM\", r=8, lora_alpha=32, target_modules=[\"q\", \"v\"],\n lora_dropout=0.01,\n )\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\") >>> model = AdaLoraModel(config, model)\n\n **Attributes**:\n - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n nn.Module.__init__(self)\n self.model = model\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_adalora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n traininable_mode_counter = 0\n for config in self.peft_config.values():\n if not config.inference_mode:\n traininable_mode_counter += 1\n\n if traininable_mode_counter > 1:\n raise ValueError(\n \"AdaLoraModel supports only 1 trainable adapter. \"\n \"When using multiple adapters, set inference_mode to True for all adapters except the one you want to train.\"\n )\n\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n if self.peft_config[adapter_name].inference_mode:\n _freeze_adapter(self.model, adapter_name)\n else:\n self.trainable_adapter_name = adapter_name\n self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)\n\n def _find_and_replace(self, adapter_name):\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.init_r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, LoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.init_r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = SVDLinear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **kwargs\n )\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def forward(self, *args, **kwargs):\n outputs = self.model.forward(*args, **kwargs)\n\n # Calculate the orthogonal regularization\n orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight\n assert orth_reg_weight > 0\n\n if hasattr(outputs, \"loss\"):\n regu_loss = 0\n num_param = 0\n for n, p in self.model.named_parameters():\n if (\"lora_A\" in n or \"lora_B\" in n) and self.trainable_adapter_name in n:\n para_cov = p @ p.T if \"lora_A\" in n else p.T @ p\n I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))\n I.requires_grad = False\n num_param += 1\n regu_loss += torch.norm(para_cov - I, p=\"fro\")\n regu_loss = regu_loss / num_param\n outputs.loss += orth_reg_weight * regu_loss\n return outputs\n\n def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):\n lora_config = self.peft_config[adapter_name]\n for name, rank_idx in rank_pattern.items():\n if isinstance(rank_idx, list):\n rank = sum(rank_idx)\n elif isinstance(rank_idx, torch.Tensor):\n rank_idx = rank_idx.view(-1)\n rank = rank_idx.sum().item()\n else:\n raise ValueError(\"Unexcepted type of rank_idx\")\n key = \".\".join(name.split(\".\")[0:-2]) if adapter_name in name else \".\".join(name.split(\".\")[0:-1])\n _, target, _ = _get_submodules(self.model, key)\n lora_E_weights = target.lora_E[adapter_name][rank_idx]\n lora_A_weights = target.lora_A[adapter_name][rank_idx]\n lora_B_weights = target.lora_B[adapter_name][:, rank_idx]\n ranknum = target.ranknum[adapter_name]\n target.update_layer(\n adapter_name,\n rank,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n with torch.no_grad():\n if rank > 0:\n target.lora_E[adapter_name].copy_(lora_E_weights)\n target.lora_A[adapter_name].copy_(lora_A_weights)\n target.lora_B[adapter_name].copy_(lora_B_weights)\n # The scaling is exactly as the previous\n target.ranknum[adapter_name].copy_(ranknum)\n\n def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):\n for name, rank_idx in rank_pattern.items():\n rank = sum(rank_idx)\n prefix = \".\".join(name.split(\".\")[0:-2]) if adapter_name in name else \".\".join(name.split(\".\")[0:-1])\n for layer in [\"lora_E\", \"lora_A\", \"lora_B\"]:\n key = f\"base_model.model.{prefix}.{layer}.{adapter_name}\"\n if layer != \"lora_B\":\n state_dict[key] = (\n state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]\n )\n else:\n state_dict[key] = (\n state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]\n )\n return state_dict\n\n def update_and_allocate(self, global_step):\n lora_config = self.peft_config[self.trainable_adapter_name]\n # Update the importance score and allocate the budget\n if global_step < lora_config.total_step - lora_config.tfinal:\n _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)\n if rank_pattern:\n lora_config.rank_pattern = rank_pattern\n # Finalize the budget allocation\n elif global_step == lora_config.total_step - lora_config.tfinal:\n _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)\n # for some reason, this freezes the trainable parameters and nothing gets updates\n # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)\n lora_config.rank_pattern = rank_pattern\n self.rankallocator.reset_ipt()\n # Currently using inefficient way to mask the unimportant weights using the rank pattern\n # due to problem mentioned above\n elif global_step > lora_config.total_step - lora_config.tfinal:\n self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)\n # Pass the function and do forward propagation\n else:\n return None\n\n @staticmethod\n def _prepare_adalora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[\n model_config[\"model_type\"]\n ]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config" }, { "identifier": "PromptEncoder", "path": "src/MLoRA/peft/tuners/p_tuning.py", "snippet": "class PromptEncoder(torch.nn.Module):\n \"\"\"\n The prompt encoder network that is used to generate the virtual token embeddings for p-tuning.\n\n Args:\n config ([`PromptEncoderConfig`]): The configuration of the prompt encoder.\n\n Example:\n\n ```py\n >>> from peft import PromptEncoder, PromptEncoderConfig\n\n >>> config = PromptEncoderConfig(\n ... peft_type=\"P_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... encoder_reparameterization_type=\"MLP\",\n ... encoder_hidden_size=768,\n ... )\n\n >>> prompt_encoder = PromptEncoder(config)\n ```\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder.\n - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`.\n - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and\n `encoder_reparameterization_type=\"LSTM\"`.\n - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model.\n - **input_size** (`int`) -- The input size of the prompt encoder.\n - **output_size** (`int`) -- The output size of the prompt encoder.\n - **hidden_size** (`int`) -- The hidden size of the prompt encoder.\n - **total_virtual_tokens** (`int`): The total number of virtual tokens of the\n prompt encoder.\n - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt\n encoder.\n\n\n Input shape: (`batch_size`, `total_virtual_tokens`)\n\n Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.token_dim = config.token_dim\n self.input_size = self.token_dim\n self.output_size = self.token_dim\n self.hidden_size = config.encoder_hidden_size\n self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules\n self.encoder_type = config.encoder_reparameterization_type\n\n # embedding\n self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)\n if not config.inference_mode:\n if self.encoder_type == PromptEncoderReparameterizationType.LSTM:\n lstm_dropout = config.encoder_dropout\n num_layers = config.encoder_num_layers\n # LSTM\n self.lstm_head = torch.nn.LSTM(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=num_layers,\n dropout=lstm_dropout,\n bidirectional=True,\n batch_first=True,\n )\n\n self.mlp_head = torch.nn.Sequential(\n torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size * 2, self.output_size),\n )\n\n elif self.encoder_type == PromptEncoderReparameterizationType.MLP:\n warnings.warn(\n f\"for {self.encoder_type}, the `encoder_num_layers` is ignored. Exactly 2 MLP layers are used.\"\n )\n layers = [\n torch.nn.Linear(self.input_size, self.hidden_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size, self.hidden_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size, self.output_size),\n ]\n self.mlp_head = torch.nn.Sequential(*layers)\n\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")\n\n def forward(self, indices):\n input_embeds = self.embedding(indices)\n if self.encoder_type == PromptEncoderReparameterizationType.LSTM:\n output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0])\n elif self.encoder_type == PromptEncoderReparameterizationType.MLP:\n output_embeds = self.mlp_head(input_embeds)\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")\n\n return output_embeds" }, { "identifier": "PrefixEncoder", "path": "src/MLoRA/peft/tuners/prefix_tuning.py", "snippet": "class PrefixEncoder(torch.nn.Module):\n r\"\"\"\n The `torch.nn` model to encode the prefix.\n\n Args:\n config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.\n\n Example:\n\n ```py\n >>> from peft import PrefixEncoder, PrefixTuningConfig\n\n >>> config = PrefixTuningConfig(\n ... peft_type=\"PREFIX_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... encoder_hidden_size=768,\n ... )\n >>> prefix_encoder = PrefixEncoder(config)\n ```\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.\n - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if\n `prefix_projection` is `True`.\n - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.\n\n Input shape: (`batch_size`, `num_virtual_tokens`)\n\n Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.prefix_projection = config.prefix_projection\n token_dim = config.token_dim\n num_layers = config.num_layers\n encoder_hidden_size = config.encoder_hidden_size\n num_virtual_tokens = config.num_virtual_tokens\n if self.prefix_projection and not config.inference_mode:\n # Use a two-layer MLP to encode the prefix\n self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)\n self.transform = torch.nn.Sequential(\n torch.nn.Linear(token_dim, encoder_hidden_size),\n torch.nn.Tanh(),\n torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim),\n )\n else:\n self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)\n\n def forward(self, prefix: torch.Tensor):\n if self.prefix_projection:\n prefix_tokens = self.embedding(prefix)\n past_key_values = self.transform(prefix_tokens)\n else:\n past_key_values = self.embedding(prefix)\n return past_key_values" }, { "identifier": "PromptEmbedding", "path": "src/MLoRA/peft/tuners/prompt_tuning.py", "snippet": "class PromptEmbedding(torch.nn.Module):\n \"\"\"\n The model to encode virtual tokens into prompt embeddings.\n\n Args:\n config ([`PromptTuningConfig`]): The configuration of the prompt embedding.\n word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model.\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding.\n\n Example:\n\n ```py\n >>> from peft import PromptEmbedding, PromptTuningConfig\n\n >>> config = PromptTuningConfig(\n ... peft_type=\"PROMPT_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... prompt_tuning_init=\"TEXT\",\n ... prompt_tuning_init_text=\"Predict if sentiment of this review is positive, negative or neutral\",\n ... tokenizer_name_or_path=\"t5-base\",\n ... )\n\n >>> # t5_model.shared is the word embeddings of the base model\n >>> prompt_embedding = PromptEmbedding(config, t5_model.shared)\n ```\n\n Input Shape: (`batch_size`, `total_virtual_tokens`)\n\n Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)\n \"\"\"\n\n def __init__(self, config, word_embeddings):\n super().__init__()\n\n total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules\n self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim)\n if config.prompt_tuning_init == PromptTuningInit.TEXT:\n from transformers import AutoTokenizer\n\n tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path)\n init_text = config.prompt_tuning_init_text\n init_token_ids = tokenizer(init_text)[\"input_ids\"]\n # Trim or iterate until num_text_tokens matches total_virtual_tokens\n num_text_tokens = len(init_token_ids)\n if num_text_tokens > total_virtual_tokens:\n init_token_ids = init_token_ids[:total_virtual_tokens]\n elif num_text_tokens < total_virtual_tokens:\n num_reps = math.ceil(total_virtual_tokens / num_text_tokens)\n init_token_ids = init_token_ids * num_reps\n init_token_ids = init_token_ids[:total_virtual_tokens]\n\n word_embedding_weights = word_embeddings(torch.LongTensor(init_token_ids)).detach().clone()\n word_embedding_weights = word_embedding_weights.to(torch.float32)\n self.embedding.weight = torch.nn.Parameter(word_embedding_weights)\n\n def forward(self, indices):\n # Just get embeddings\n prompt_embeddings = self.embedding(indices)\n return prompt_embeddings" }, { "identifier": "MMOELoraModelS", "path": "src/MLoRA/peft/tuners/mmoeloraS.py", "snippet": "class MMOELoraModelS(MMOELoraModel):\n\n def __init__(self, model, config, adapter_name):\n\n super().__init__(model, config, adapter_name)\n\n\n\n def _find_and_replace(self, adapter_name):\n \"\"\"Replace the target `Linear` module with LoRA layer (Linear+LoRA)\"\"\"\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n \"task_num\": lora_config.task_num,\n \"task_embedding_dim\": lora_config.task_embedding_dim,\n \"expert_num\": lora_config.expert_num,\n }\n key_list = [key for key, _ in self.model.named_modules()] # all module in raw model\n for key in key_list:\n # find the corresponding modules. target module has been split into list.\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, MMOELoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.init_r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n raise NotImplementedError\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = MMOELoraLinearS(adapter_name, in_features, out_features, \n bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )" }, { "identifier": "PeftConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})" }, { "identifier": "PeftType", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_PROMPT\"\n MMOELORAS = \"MMOELORAS\"" }, { "identifier": "PromptLearningConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})" }, { "identifier": "TaskType", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class TaskType(str, enum.Enum):\n SEQ_CLS = \"SEQ_CLS\"\n SEQ_2_SEQ_LM = \"SEQ_2_SEQ_LM\"\n CAUSAL_LM = \"CAUSAL_LM\"\n TOKEN_CLS = \"TOKEN_CLS\"\n CAUSAL_LMS = \"CAUSAL_LMS\"" }, { "identifier": "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING", "path": "src/MLoRA/peft/utils/other.py", "snippet": "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = {\n \"bloom\": bloom_model_postprocess_past_key_value,\n}" }, { "identifier": "WEIGHTS_NAME", "path": "src/MLoRA/peft/utils/other.py", "snippet": "WEIGHTS_NAME = \"adapter_model.bin\"" }, { "identifier": "_set_trainable", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def _set_trainable(model, adapter_name):\n key_list = [key for key, _ in model.named_modules()]\n for key in key_list:\n target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save)\n if target_module_found:\n parent, target, target_name = _get_submodules(model, key)\n if isinstance(target, ModulesToSaveWrapper):\n target.update(adapter_name)\n else:\n for param in target.parameters():\n param.requires_grad = True\n setattr(parent, target_name, ModulesToSaveWrapper(target, adapter_name))" }, { "identifier": "shift_tokens_right", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):\n \"\"\"\n Shift input ids one token to the right.\n\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids\n pad_token_id (`int`): The id of the `padding` token.\n decoder_start_token_id (`int`): The id of the `start` token.\n \"\"\"\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()\n shifted_input_ids[:, 0] = decoder_start_token_id\n\n if pad_token_id is None:\n raise ValueError(\"self.model.config.pad_token_id has to be defined.\")\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n return shifted_input_ids" }, { "identifier": "_set_adapter", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def _set_adapter(model, adapter_name):\n for module in model.modules():\n if isinstance(module, ModulesToSaveWrapper):\n module.active_adapter = adapter_name" }, { "identifier": "get_peft_model_state_dict", "path": "src/MLoRA/peft/utils/save_and_load.py", "snippet": "def get_peft_model_state_dict(model, state_dict=None, adapter_name=\"default\"):\n \"\"\"\n Get the state dict of the Peft model.\n\n Args:\n model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP,\n the model should be the underlying model/unwrapped model (i.e. model.module).\n state_dict (`dict`, *optional*, defaults to `None`):\n The state dict of the model. If not provided, the state dict of the model\n will be used.\n \"\"\"\n config = model.peft_config[adapter_name]\n if state_dict is None:\n state_dict = model.state_dict()\n if config.peft_type in (PeftType.LORA, PeftType.ADALORA,\n PeftType.MMOELORAS):\n # to_return = lora_state_dict(model, bias=model.peft_config.bias)\n # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py`\n # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP\n bias = config.bias\n if bias == \"none\": # filter out all lora parameters\n to_return = {k: state_dict[k] for k in state_dict if \"lora_\" in k}\n elif bias == \"all\":\n to_return = {k: state_dict[k] for k in state_dict if \"lora_\" in k or \"bias\" in k}\n elif bias == \"lora_only\":\n to_return = {}\n for k in state_dict:\n if \"lora_\" in k:\n to_return[k] = state_dict[k]\n bias_name = k.split(\"lora_\")[0] + \"bias\"\n if bias_name in state_dict:\n to_return[bias_name] = state_dict[bias_name]\n else:\n raise NotImplementedError\n to_return = {k: v for k, v in to_return.items() if ((\"lora_\" in k and adapter_name in k) or (\"bias\" in k))}\n\n if config.peft_type == PeftType.ADALORA:\n rank_pattern = config.rank_pattern\n if rank_pattern is not None:\n rank_pattern = {k.replace(f\".{adapter_name}\", \"\"): v for k, v in rank_pattern.items()}\n config.rank_pattern = rank_pattern\n to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name)\n\n elif config.peft_type == PeftType.ADAPTION_PROMPT:\n to_return = {k: state_dict[k] for k in state_dict if k.split(\".\")[-1].startswith(\"adaption_\")}\n elif isinstance(config, PromptLearningConfig):\n to_return = {}\n if config.inference_mode:\n prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight\n else:\n prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name)\n to_return[\"prompt_embeddings\"] = prompt_embeddings\n else:\n raise NotImplementedError\n if model.modules_to_save is not None:\n for key, value in state_dict.items():\n if any(f\"{module_name}.modules_to_save.{adapter_name}\" in key for module_name in model.modules_to_save):\n to_return[key.replace(\"modules_to_save.\", \"\")] = value\n\n to_return = {k.replace(f\".{adapter_name}\", \"\"): v for k, v in to_return.items()}\n return to_return" }, { "identifier": "set_peft_model_state_dict", "path": "src/MLoRA/peft/utils/save_and_load.py", "snippet": "def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name=\"default\"):\n \"\"\"\n Set the state dict of the Peft model.\n\n Args:\n model ([`PeftModel`]): The Peft model.\n peft_model_state_dict (`dict`): The state dict of the Peft model.\n \"\"\"\n config = model.peft_config[adapter_name]\n state_dict = {}\n if model.modules_to_save is not None:\n for key, value in peft_model_state_dict.items():\n if any(module_name in key for module_name in model.modules_to_save):\n for module_name in model.modules_to_save:\n if module_name in key:\n key = key.replace(module_name, f\"{module_name}.modules_to_save.{adapter_name}\")\n break\n state_dict[key] = value\n else:\n state_dict = peft_model_state_dict\n\n if config.peft_type in (PeftType.LORA, PeftType.ADALORA,\n PeftType.MMOELORAS):\n peft_model_state_dict = {}\n for k, v in state_dict.items():\n if \"lora_\" in k:\n suffix = k.split(\"lora_\")[1]\n if \".\" in suffix:\n suffix_to_replace = \".\".join(suffix.split(\".\")[1:])\n k = k.replace(suffix_to_replace, f\"{adapter_name}.{suffix_to_replace}\")\n else:\n k = f\"{k}.{adapter_name}\"\n peft_model_state_dict[k] = v\n else:\n peft_model_state_dict[k] = v\n if config.peft_type == PeftType.ADALORA:\n rank_pattern = config.rank_pattern\n if rank_pattern is not None:\n model.resize_modules_by_rank_pattern(rank_pattern, adapter_name)\n elif isinstance(config, PromptLearningConfig) or config.peft_type == PeftType.ADAPTION_PROMPT:\n peft_model_state_dict = state_dict\n else:\n raise NotImplementedError\n\n model.load_state_dict(peft_model_state_dict, strict=False)\n if isinstance(config, PromptLearningConfig):\n model.prompt_encoder[adapter_name].embedding.load_state_dict(\n {\"weight\": peft_model_state_dict[\"prompt_embeddings\"]}, strict=True\n )" } ]
import inspect import os import warnings import torch import torch.nn as nn from contextlib import contextmanager from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import hf_hub_download from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from .utils import PeftConfig from .shared import Gate, GateN from .tuners import ( AdaLoraModel, AdaptionPromptModel, LoraModel, PrefixEncoder, PromptEmbedding, PromptEncoder, MMOELoraModelS, ) from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftConfig, PeftType, PromptLearningConfig, TaskType, _set_adapter, _set_trainable, get_peft_model_state_dict, set_peft_model_state_dict, shift_tokens_right, ) from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
15,187
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder, PeftType.ADALORA: AdaLoraModel, PeftType.ADAPTION_PROMPT: AdaptionPromptModel, PeftType.MMOELORAS: MMOELoraModelS, } class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. **Attributes**: - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__() self.base_model = model self.config = self.base_model.config self.modules_to_save = None self.peft_config = {} self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self.base_model_torch_dtype = getattr(model, "dtype", None) if not isinstance(peft_config, PromptLearningConfig): self.peft_config[adapter_name] = peft_config self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]( self.base_model, self.peft_config, adapter_name ) self.set_additional_trainable_modules(peft_config, adapter_name) else: self.add_adapter(adapter_name, peft_config) def save_pretrained(self, save_directory, **kwargs): r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`LoraModel.from_pretrained`] class method, and also used by the [`LoraModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) for adapter_name, peft_config in self.peft_config.items(): # save only the trainable weights
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder, PeftType.ADALORA: AdaLoraModel, PeftType.ADAPTION_PROMPT: AdaptionPromptModel, PeftType.MMOELORAS: MMOELoraModelS, } class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. **Attributes**: - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__() self.base_model = model self.config = self.base_model.config self.modules_to_save = None self.peft_config = {} self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self.base_model_torch_dtype = getattr(model, "dtype", None) if not isinstance(peft_config, PromptLearningConfig): self.peft_config[adapter_name] = peft_config self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]( self.base_model, self.peft_config, adapter_name ) self.set_additional_trainable_modules(peft_config, adapter_name) else: self.add_adapter(adapter_name, peft_config) def save_pretrained(self, save_directory, **kwargs): r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`LoraModel.from_pretrained`] class method, and also used by the [`LoraModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) for adapter_name, peft_config in self.peft_config.items(): # save only the trainable weights
output_state_dict = get_peft_model_state_dict(
19
2023-10-19 10:55:50+00:00
24k
YuroFR/freqtrade-modded-crypto-trading-bot
tests/freqai/test_freqai_datakitchen.py
[ { "identifier": "TimeRange", "path": "freqtrade/configuration/timerange.py", "snippet": "class TimeRange:\n \"\"\"\n object defining timerange inputs.\n [start/stop]type defines if [start/stop]ts shall be used.\n if *type is None, don't use corresponding startvalue.\n \"\"\"\n\n def __init__(self, starttype: Optional[str] = None, stoptype: Optional[str] = None,\n startts: int = 0, stopts: int = 0):\n\n self.starttype: Optional[str] = starttype\n self.stoptype: Optional[str] = stoptype\n self.startts: int = startts\n self.stopts: int = stopts\n\n @property\n def startdt(self) -> Optional[datetime]:\n if self.startts:\n return datetime.fromtimestamp(self.startts, tz=timezone.utc)\n return None\n\n @property\n def stopdt(self) -> Optional[datetime]:\n if self.stopts:\n return datetime.fromtimestamp(self.stopts, tz=timezone.utc)\n return None\n\n @property\n def timerange_str(self) -> str:\n \"\"\"\n Returns a string representation of the timerange as used by parse_timerange.\n Follows the format yyyymmdd-yyyymmdd - leaving out the parts that are not set.\n \"\"\"\n start = ''\n stop = ''\n if startdt := self.startdt:\n start = startdt.strftime('%Y%m%d')\n if stopdt := self.stopdt:\n stop = stopdt.strftime('%Y%m%d')\n return f\"{start}-{stop}\"\n\n @property\n def start_fmt(self) -> str:\n \"\"\"\n Returns a string representation of the start date\n \"\"\"\n val = 'unbounded'\n if (startdt := self.startdt) is not None:\n val = startdt.strftime(DATETIME_PRINT_FORMAT)\n return val\n\n @property\n def stop_fmt(self) -> str:\n \"\"\"\n Returns a string representation of the stop date\n \"\"\"\n val = 'unbounded'\n if (stopdt := self.stopdt) is not None:\n val = stopdt.strftime(DATETIME_PRINT_FORMAT)\n return val\n\n def __eq__(self, other):\n \"\"\"Override the default Equals behavior\"\"\"\n return (self.starttype == other.starttype and self.stoptype == other.stoptype\n and self.startts == other.startts and self.stopts == other.stopts)\n\n def subtract_start(self, seconds: int) -> None:\n \"\"\"\n Subtracts <seconds> from startts if startts is set.\n :param seconds: Seconds to subtract from starttime\n :return: None (Modifies the object in place)\n \"\"\"\n if self.startts:\n self.startts = self.startts - seconds\n\n def adjust_start_if_necessary(self, timeframe_secs: int, startup_candles: int,\n min_date: datetime) -> None:\n \"\"\"\n Adjust startts by <startup_candles> candles.\n Applies only if no startup-candles have been available.\n :param timeframe_secs: Timeframe in seconds e.g. `timeframe_to_seconds('5m')`\n :param startup_candles: Number of candles to move start-date forward\n :param min_date: Minimum data date loaded. Key kriterium to decide if start-time\n has to be moved\n :return: None (Modifies the object in place)\n \"\"\"\n if (not self.starttype or (startup_candles\n and min_date.timestamp() >= self.startts)):\n # If no startts was defined, or backtest-data starts at the defined backtest-date\n logger.warning(\"Moving start-date by %s candles to account for startup time.\",\n startup_candles)\n self.startts = int(min_date.timestamp() + timeframe_secs * startup_candles)\n self.starttype = 'date'\n\n @classmethod\n def parse_timerange(cls, text: Optional[str]) -> Self:\n \"\"\"\n Parse the value of the argument --timerange to determine what is the range desired\n :param text: value from --timerange\n :return: Start and End range period\n \"\"\"\n if not text:\n return cls(None, None, 0, 0)\n syntax = [(r'^-(\\d{8})$', (None, 'date')),\n (r'^(\\d{8})-$', ('date', None)),\n (r'^(\\d{8})-(\\d{8})$', ('date', 'date')),\n (r'^-(\\d{10})$', (None, 'date')),\n (r'^(\\d{10})-$', ('date', None)),\n (r'^(\\d{10})-(\\d{10})$', ('date', 'date')),\n (r'^-(\\d{13})$', (None, 'date')),\n (r'^(\\d{13})-$', ('date', None)),\n (r'^(\\d{13})-(\\d{13})$', ('date', 'date')),\n ]\n for rex, stype in syntax:\n # Apply the regular expression to text\n match = re.match(rex, text)\n if match: # Regex has matched\n rvals = match.groups()\n index = 0\n start: int = 0\n stop: int = 0\n if stype[0]:\n starts = rvals[index]\n if stype[0] == 'date' and len(starts) == 8:\n start = int(datetime.strptime(starts, '%Y%m%d').replace(\n tzinfo=timezone.utc).timestamp())\n elif len(starts) == 13:\n start = int(starts) // 1000\n else:\n start = int(starts)\n index += 1\n if stype[1]:\n stops = rvals[index]\n if stype[1] == 'date' and len(stops) == 8:\n stop = int(datetime.strptime(stops, '%Y%m%d').replace(\n tzinfo=timezone.utc).timestamp())\n elif len(stops) == 13:\n stop = int(stops) // 1000\n else:\n stop = int(stops)\n if start > stop > 0:\n raise OperationalException(\n f'Start date is after stop date for timerange \"{text}\"')\n return cls(stype[0], stype[1], start, stop)\n raise OperationalException(f'Incorrect syntax for timerange \"{text}\"')" }, { "identifier": "DataProvider", "path": "freqtrade/data/dataprovider.py", "snippet": "class DataProvider:\n\n def __init__(\n self,\n config: Config,\n exchange: Optional[Exchange],\n pairlists=None,\n rpc: Optional[RPCManager] = None\n ) -> None:\n self._config = config\n self._exchange = exchange\n self._pairlists = pairlists\n self.__rpc = rpc\n self.__cached_pairs: Dict[PairWithTimeframe, Tuple[DataFrame, datetime]] = {}\n self.__slice_index: Optional[int] = None\n self.__slice_date: Optional[datetime] = None\n\n self.__cached_pairs_backtesting: Dict[PairWithTimeframe, DataFrame] = {}\n self.__producer_pairs_df: Dict[str,\n Dict[PairWithTimeframe, Tuple[DataFrame, datetime]]] = {}\n self.__producer_pairs: Dict[str, List[str]] = {}\n self._msg_queue: deque = deque()\n\n self._default_candle_type = self._config.get('candle_type_def', CandleType.SPOT)\n self._default_timeframe = self._config.get('timeframe', '1h')\n\n self.__msg_cache = PeriodicCache(\n maxsize=1000, ttl=timeframe_to_seconds(self._default_timeframe))\n\n self.producers = self._config.get('external_message_consumer', {}).get('producers', [])\n self.external_data_enabled = len(self.producers) > 0\n\n def _set_dataframe_max_index(self, limit_index: int):\n \"\"\"\n Limit analyzed dataframe to max specified index.\n Only relevant in backtesting.\n :param limit_index: dataframe index.\n \"\"\"\n self.__slice_index = limit_index\n\n def _set_dataframe_max_date(self, limit_date: datetime):\n \"\"\"\n Limit infomrative dataframe to max specified index.\n Only relevant in backtesting.\n :param limit_date: \"current date\"\n \"\"\"\n self.__slice_date = limit_date\n\n def _set_cached_df(\n self,\n pair: str,\n timeframe: str,\n dataframe: DataFrame,\n candle_type: CandleType\n ) -> None:\n \"\"\"\n Store cached Dataframe.\n Using private method as this should never be used by a user\n (but the class is exposed via `self.dp` to the strategy)\n :param pair: pair to get the data for\n :param timeframe: Timeframe to get data for\n :param dataframe: analyzed dataframe\n :param candle_type: Any of the enum CandleType (must match trading mode!)\n \"\"\"\n pair_key = (pair, timeframe, candle_type)\n self.__cached_pairs[pair_key] = (\n dataframe, datetime.now(timezone.utc))\n\n # For multiple producers we will want to merge the pairlists instead of overwriting\n def _set_producer_pairs(self, pairlist: List[str], producer_name: str = \"default\"):\n \"\"\"\n Set the pairs received to later be used.\n\n :param pairlist: List of pairs\n \"\"\"\n self.__producer_pairs[producer_name] = pairlist\n\n def get_producer_pairs(self, producer_name: str = \"default\") -> List[str]:\n \"\"\"\n Get the pairs cached from the producer\n\n :returns: List of pairs\n \"\"\"\n return self.__producer_pairs.get(producer_name, []).copy()\n\n def _emit_df(\n self,\n pair_key: PairWithTimeframe,\n dataframe: DataFrame,\n new_candle: bool\n ) -> None:\n \"\"\"\n Send this dataframe as an ANALYZED_DF message to RPC\n\n :param pair_key: PairWithTimeframe tuple\n :param dataframe: Dataframe to emit\n :param new_candle: This is a new candle\n \"\"\"\n if self.__rpc:\n msg: RPCAnalyzedDFMsg = {\n 'type': RPCMessageType.ANALYZED_DF,\n 'data': {\n 'key': pair_key,\n 'df': dataframe.tail(1),\n 'la': datetime.now(timezone.utc)\n }\n }\n self.__rpc.send_msg(msg)\n if new_candle:\n self.__rpc.send_msg({\n 'type': RPCMessageType.NEW_CANDLE,\n 'data': pair_key,\n })\n\n def _replace_external_df(\n self,\n pair: str,\n dataframe: DataFrame,\n last_analyzed: datetime,\n timeframe: str,\n candle_type: CandleType,\n producer_name: str = \"default\"\n ) -> None:\n \"\"\"\n Add the pair data to this class from an external source.\n\n :param pair: pair to get the data for\n :param timeframe: Timeframe to get data for\n :param candle_type: Any of the enum CandleType (must match trading mode!)\n \"\"\"\n pair_key = (pair, timeframe, candle_type)\n\n if producer_name not in self.__producer_pairs_df:\n self.__producer_pairs_df[producer_name] = {}\n\n _last_analyzed = datetime.now(timezone.utc) if not last_analyzed else last_analyzed\n\n self.__producer_pairs_df[producer_name][pair_key] = (dataframe, _last_analyzed)\n logger.debug(f\"External DataFrame for {pair_key} from {producer_name} added.\")\n\n def _add_external_df(\n self,\n pair: str,\n dataframe: DataFrame,\n last_analyzed: datetime,\n timeframe: str,\n candle_type: CandleType,\n producer_name: str = \"default\"\n ) -> Tuple[bool, int]:\n \"\"\"\n Append a candle to the existing external dataframe. The incoming dataframe\n must have at least 1 candle.\n\n :param pair: pair to get the data for\n :param timeframe: Timeframe to get data for\n :param candle_type: Any of the enum CandleType (must match trading mode!)\n :returns: False if the candle could not be appended, or the int number of missing candles.\n \"\"\"\n pair_key = (pair, timeframe, candle_type)\n\n if dataframe.empty:\n # The incoming dataframe must have at least 1 candle\n return (False, 0)\n\n if len(dataframe) >= FULL_DATAFRAME_THRESHOLD:\n # This is likely a full dataframe\n # Add the dataframe to the dataprovider\n self._replace_external_df(\n pair,\n dataframe,\n last_analyzed=last_analyzed,\n timeframe=timeframe,\n candle_type=candle_type,\n producer_name=producer_name\n )\n return (True, 0)\n\n if (producer_name not in self.__producer_pairs_df\n or pair_key not in self.__producer_pairs_df[producer_name]):\n # We don't have data from this producer yet,\n # or we don't have data for this pair_key\n # return False and 1000 for the full df\n return (False, 1000)\n\n existing_df, _ = self.__producer_pairs_df[producer_name][pair_key]\n\n # CHECK FOR MISSING CANDLES\n # Convert the timeframe to a timedelta for pandas\n timeframe_delta: Timedelta = to_timedelta(timeframe)\n local_last: Timestamp = existing_df.iloc[-1]['date'] # We want the last date from our copy\n # We want the first date from the incoming\n incoming_first: Timestamp = dataframe.iloc[0]['date']\n\n # Remove existing candles that are newer than the incoming first candle\n existing_df1 = existing_df[existing_df['date'] < incoming_first]\n\n candle_difference = (incoming_first - local_last) / timeframe_delta\n\n # If the difference divided by the timeframe is 1, then this\n # is the candle we want and the incoming data isn't missing any.\n # If the candle_difference is more than 1, that means\n # we missed some candles between our data and the incoming\n # so return False and candle_difference.\n if candle_difference > 1:\n return (False, int(candle_difference))\n if existing_df1.empty:\n appended_df = dataframe\n else:\n appended_df = append_candles_to_dataframe(existing_df1, dataframe)\n\n # Everything is good, we appended\n self._replace_external_df(\n pair,\n appended_df,\n last_analyzed=last_analyzed,\n timeframe=timeframe,\n candle_type=candle_type,\n producer_name=producer_name\n )\n return (True, 0)\n\n def get_producer_df(\n self,\n pair: str,\n timeframe: Optional[str] = None,\n candle_type: Optional[CandleType] = None,\n producer_name: str = \"default\"\n ) -> Tuple[DataFrame, datetime]:\n \"\"\"\n Get the pair data from producers.\n\n :param pair: pair to get the data for\n :param timeframe: Timeframe to get data for\n :param candle_type: Any of the enum CandleType (must match trading mode!)\n :returns: Tuple of the DataFrame and last analyzed timestamp\n \"\"\"\n _timeframe = self._default_timeframe if not timeframe else timeframe\n _candle_type = self._default_candle_type if not candle_type else candle_type\n\n pair_key = (pair, _timeframe, _candle_type)\n\n # If we have no data from this Producer yet\n if producer_name not in self.__producer_pairs_df:\n # We don't have this data yet, return empty DataFrame and datetime (01-01-1970)\n return (DataFrame(), datetime.fromtimestamp(0, tz=timezone.utc))\n\n # If we do have data from that Producer, but no data on this pair_key\n if pair_key not in self.__producer_pairs_df[producer_name]:\n # We don't have this data yet, return empty DataFrame and datetime (01-01-1970)\n return (DataFrame(), datetime.fromtimestamp(0, tz=timezone.utc))\n\n # We have it, return this data\n df, la = self.__producer_pairs_df[producer_name][pair_key]\n return (df.copy(), la)\n\n def add_pairlisthandler(self, pairlists) -> None:\n \"\"\"\n Allow adding pairlisthandler after initialization\n \"\"\"\n self._pairlists = pairlists\n\n def historic_ohlcv(\n self,\n pair: str,\n timeframe: str,\n candle_type: str = ''\n ) -> DataFrame:\n \"\"\"\n Get stored historical candle (OHLCV) data\n :param pair: pair to get the data for\n :param timeframe: timeframe to get data for\n :param candle_type: '', mark, index, premiumIndex, or funding_rate\n \"\"\"\n _candle_type = CandleType.from_string(\n candle_type) if candle_type != '' else self._config['candle_type_def']\n saved_pair: PairWithTimeframe = (pair, str(timeframe), _candle_type)\n if saved_pair not in self.__cached_pairs_backtesting:\n timerange = TimeRange.parse_timerange(None if self._config.get(\n 'timerange') is None else str(self._config.get('timerange')))\n\n # It is not necessary to add the training candles, as they\n # were already added at the beginning of the backtest.\n startup_candles = self.get_required_startup(str(timeframe), False)\n tf_seconds = timeframe_to_seconds(str(timeframe))\n timerange.subtract_start(tf_seconds * startup_candles)\n self.__cached_pairs_backtesting[saved_pair] = load_pair_history(\n pair=pair,\n timeframe=timeframe,\n datadir=self._config['datadir'],\n timerange=timerange,\n data_format=self._config['dataformat_ohlcv'],\n candle_type=_candle_type,\n\n )\n return self.__cached_pairs_backtesting[saved_pair].copy()\n\n def get_required_startup(self, timeframe: str, add_train_candles: bool = True) -> int:\n freqai_config = self._config.get('freqai', {})\n if not freqai_config.get('enabled', False):\n return self._config.get('startup_candle_count', 0)\n else:\n startup_candles = self._config.get('startup_candle_count', 0)\n indicator_periods = freqai_config['feature_parameters']['indicator_periods_candles']\n # make sure the startupcandles is at least the set maximum indicator periods\n self._config['startup_candle_count'] = max(startup_candles, max(indicator_periods))\n tf_seconds = timeframe_to_seconds(timeframe)\n train_candles = 0\n if add_train_candles:\n train_candles = freqai_config['train_period_days'] * 86400 / tf_seconds\n total_candles = int(self._config['startup_candle_count'] + train_candles)\n logger.info(f'Increasing startup_candle_count for freqai to {total_candles}')\n return total_candles\n\n def get_pair_dataframe(\n self,\n pair: str,\n timeframe: Optional[str] = None,\n candle_type: str = ''\n ) -> DataFrame:\n \"\"\"\n Return pair candle (OHLCV) data, either live or cached historical -- depending\n on the runmode.\n Only combinations in the pairlist or which have been specified as informative pairs\n will be available.\n :param pair: pair to get the data for\n :param timeframe: timeframe to get data for\n :return: Dataframe for this pair\n :param candle_type: '', mark, index, premiumIndex, or funding_rate\n \"\"\"\n if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE):\n # Get live OHLCV data.\n data = self.ohlcv(pair=pair, timeframe=timeframe, candle_type=candle_type)\n else:\n # Get historical OHLCV data (cached on disk).\n timeframe = timeframe or self._config['timeframe']\n data = self.historic_ohlcv(pair=pair, timeframe=timeframe, candle_type=candle_type)\n # Cut date to timeframe-specific date.\n # This is necessary to prevent lookahead bias in callbacks through informative pairs.\n if self.__slice_date:\n cutoff_date = timeframe_to_prev_date(timeframe, self.__slice_date)\n data = data.loc[data['date'] < cutoff_date]\n if len(data) == 0:\n logger.warning(f\"No data found for ({pair}, {timeframe}, {candle_type}).\")\n return data\n\n def get_analyzed_dataframe(self, pair: str, timeframe: str) -> Tuple[DataFrame, datetime]:\n \"\"\"\n Retrieve the analyzed dataframe. Returns the full dataframe in trade mode (live / dry),\n and the last 1000 candles (up to the time evaluated at this moment) in all other modes.\n :param pair: pair to get the data for\n :param timeframe: timeframe to get data for\n :return: Tuple of (Analyzed Dataframe, lastrefreshed) for the requested pair / timeframe\n combination.\n Returns empty dataframe and Epoch 0 (1970-01-01) if no dataframe was cached.\n \"\"\"\n pair_key = (pair, timeframe, self._config.get('candle_type_def', CandleType.SPOT))\n if pair_key in self.__cached_pairs:\n if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE):\n df, date = self.__cached_pairs[pair_key]\n else:\n df, date = self.__cached_pairs[pair_key]\n if self.__slice_index is not None:\n max_index = self.__slice_index\n df = df.iloc[max(0, max_index - MAX_DATAFRAME_CANDLES):max_index]\n return df, date\n else:\n return (DataFrame(), datetime.fromtimestamp(0, tz=timezone.utc))\n\n @property\n def runmode(self) -> RunMode:\n \"\"\"\n Get runmode of the bot\n can be \"live\", \"dry-run\", \"backtest\", \"edgecli\", \"hyperopt\" or \"other\".\n \"\"\"\n return RunMode(self._config.get('runmode', RunMode.OTHER))\n\n def current_whitelist(self) -> List[str]:\n \"\"\"\n fetch latest available whitelist.\n\n Useful when you have a large whitelist and need to call each pair as an informative pair.\n As available pairs does not show whitelist until after informative pairs have been cached.\n :return: list of pairs in whitelist\n \"\"\"\n\n if self._pairlists:\n return self._pairlists.whitelist.copy()\n else:\n raise OperationalException(\"Dataprovider was not initialized with a pairlist provider.\")\n\n def clear_cache(self):\n \"\"\"\n Clear pair dataframe cache.\n \"\"\"\n self.__cached_pairs = {}\n # Don't reset backtesting pairs -\n # otherwise they're reloaded each time during hyperopt due to with analyze_per_epoch\n # self.__cached_pairs_backtesting = {}\n self.__slice_index = 0\n\n # Exchange functions\n\n def refresh(self,\n pairlist: ListPairsWithTimeframes,\n helping_pairs: Optional[ListPairsWithTimeframes] = None) -> None:\n \"\"\"\n Refresh data, called with each cycle\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n final_pairs = (pairlist + helping_pairs) if helping_pairs else pairlist\n self._exchange.refresh_latest_ohlcv(final_pairs)\n\n @property\n def available_pairs(self) -> ListPairsWithTimeframes:\n \"\"\"\n Return a list of tuples containing (pair, timeframe) for which data is currently cached.\n Should be whitelist + open trades.\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n return list(self._exchange._klines.keys())\n\n def ohlcv(\n self,\n pair: str,\n timeframe: Optional[str] = None,\n copy: bool = True,\n candle_type: str = ''\n ) -> DataFrame:\n \"\"\"\n Get candle (OHLCV) data for the given pair as DataFrame\n Please use the `available_pairs` method to verify which pairs are currently cached.\n :param pair: pair to get the data for\n :param timeframe: Timeframe to get data for\n :param candle_type: '', mark, index, premiumIndex, or funding_rate\n :param copy: copy dataframe before returning if True.\n Use False only for read-only operations (where the dataframe is not modified)\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE):\n _candle_type = CandleType.from_string(\n candle_type) if candle_type != '' else self._config['candle_type_def']\n return self._exchange.klines(\n (pair, timeframe or self._config['timeframe'], _candle_type),\n copy=copy\n )\n else:\n return DataFrame()\n\n def market(self, pair: str) -> Optional[Dict[str, Any]]:\n \"\"\"\n Return market data for the pair\n :param pair: Pair to get the data for\n :return: Market data dict from ccxt or None if market info is not available for the pair\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n return self._exchange.markets.get(pair)\n\n def ticker(self, pair: str):\n \"\"\"\n Return last ticker data from exchange\n :param pair: Pair to get the data for\n :return: Ticker dict from exchange or empty dict if ticker is not available for the pair\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n try:\n return self._exchange.fetch_ticker(pair)\n except ExchangeError:\n return {}\n\n def orderbook(self, pair: str, maximum: int) -> OrderBook:\n \"\"\"\n Fetch latest l2 orderbook data\n Warning: Does a network request - so use with common sense.\n :param pair: pair to get the data for\n :param maximum: Maximum number of orderbook entries to query\n :return: dict including bids/asks with a total of `maximum` entries.\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n return self._exchange.fetch_l2_order_book(pair, maximum)\n\n def send_msg(self, message: str, *, always_send: bool = False) -> None:\n \"\"\"\n Send custom RPC Notifications from your bot.\n Will not send any bot in modes other than Dry-run or Live.\n :param message: Message to be sent. Must be below 4096.\n :param always_send: If False, will send the message only once per candle, and surpress\n identical messages.\n Careful as this can end up spaming your chat.\n Defaults to False\n \"\"\"\n if self.runmode not in (RunMode.DRY_RUN, RunMode.LIVE):\n return\n\n if always_send or message not in self.__msg_cache:\n self._msg_queue.append(message)\n self.__msg_cache[message] = True" }, { "identifier": "OperationalException", "path": "freqtrade/exceptions.py", "snippet": "class OperationalException(FreqtradeException):\n \"\"\"\n Requires manual intervention and will stop the bot.\n Most of the time, this is caused by an invalid Configuration.\n \"\"\"" }, { "identifier": "FreqaiDataKitchen", "path": "freqtrade/freqai/data_kitchen.py", "snippet": "class FreqaiDataKitchen:\n \"\"\"\n Class designed to analyze data for a single pair. Employed by the IFreqaiModel class.\n Functionalities include holding, saving, loading, and analyzing the data.\n\n This object is not persistent, it is reinstantiated for each coin, each time the coin\n model needs to be inferenced or trained.\n\n Record of contribution:\n FreqAI was developed by a group of individuals who all contributed specific skillsets to the\n project.\n\n Conception and software development:\n Robert Caulk @robcaulk\n\n Theoretical brainstorming:\n Elin Törnquist @th0rntwig\n\n Code review, software architecture brainstorming:\n @xmatthias\n\n Beta testing and bug reporting:\n @bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm\n Juha Nykänen @suikula, Wagner Costa @wagnercosta, Johan Vlugt @Jooopieeert\n \"\"\"\n\n def __init__(\n self,\n config: Config,\n live: bool = False,\n pair: str = \"\",\n ):\n self.data: Dict[str, Any] = {}\n self.data_dictionary: Dict[str, DataFrame] = {}\n self.config = config\n self.freqai_config: Dict[str, Any] = config[\"freqai\"]\n self.full_df: DataFrame = DataFrame()\n self.append_df: DataFrame = DataFrame()\n self.data_path = Path()\n self.label_list: List = []\n self.training_features_list: List = []\n self.model_filename: str = \"\"\n self.backtesting_results_path = Path()\n self.backtest_predictions_folder: str = \"backtesting_predictions\"\n self.live = live\n self.pair = pair\n self.keras: bool = self.freqai_config.get(\"keras\", False)\n self.set_all_pairs()\n self.backtest_live_models = config.get(\"freqai_backtest_live_models\", False)\n self.feature_pipeline = Pipeline()\n self.label_pipeline = Pipeline()\n self.DI_values: npt.NDArray = np.array([])\n\n if not self.live:\n self.full_path = self.get_full_models_path(self.config)\n\n if not self.backtest_live_models:\n self.full_timerange = self.create_fulltimerange(\n self.config[\"timerange\"], self.freqai_config.get(\"train_period_days\", 0)\n )\n (self.training_timeranges, self.backtesting_timeranges) = self.split_timerange(\n self.full_timerange,\n config[\"freqai\"][\"train_period_days\"],\n config[\"freqai\"][\"backtest_period_days\"],\n )\n\n self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {})\n if not self.freqai_config.get(\"data_kitchen_thread_count\", 0):\n self.thread_count = max(int(psutil.cpu_count() * 2 - 2), 1)\n else:\n self.thread_count = self.freqai_config[\"data_kitchen_thread_count\"]\n self.train_dates: DataFrame = pd.DataFrame()\n self.unique_classes: Dict[str, list] = {}\n self.unique_class_list: list = []\n self.backtest_live_models_data: Dict[str, Any] = {}\n\n def set_paths(\n self,\n pair: str,\n trained_timestamp: Optional[int] = None,\n ) -> None:\n \"\"\"\n Set the paths to the data for the present coin/botloop\n :param metadata: dict = strategy furnished pair metadata\n :param trained_timestamp: int = timestamp of most recent training\n \"\"\"\n self.full_path = self.get_full_models_path(self.config)\n self.data_path = Path(\n self.full_path\n / f\"sub-train-{pair.split('/')[0]}_{trained_timestamp}\"\n )\n\n return\n\n def make_train_test_datasets(\n self, filtered_dataframe: DataFrame, labels: DataFrame\n ) -> Dict[Any, Any]:\n \"\"\"\n Given the dataframe for the full history for training, split the data into\n training and test data according to user specified parameters in configuration\n file.\n :param filtered_dataframe: cleaned dataframe ready to be split.\n :param labels: cleaned labels ready to be split.\n \"\"\"\n feat_dict = self.freqai_config[\"feature_parameters\"]\n\n if 'shuffle' not in self.freqai_config['data_split_parameters']:\n self.freqai_config[\"data_split_parameters\"].update({'shuffle': False})\n\n weights: npt.ArrayLike\n if feat_dict.get(\"weight_factor\", 0) > 0:\n weights = self.set_weights_higher_recent(len(filtered_dataframe))\n else:\n weights = np.ones(len(filtered_dataframe))\n\n if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:\n (\n train_features,\n test_features,\n train_labels,\n test_labels,\n train_weights,\n test_weights,\n ) = train_test_split(\n filtered_dataframe[: filtered_dataframe.shape[0]],\n labels,\n weights,\n **self.config[\"freqai\"][\"data_split_parameters\"],\n )\n else:\n test_labels = np.zeros(2)\n test_features = pd.DataFrame()\n test_weights = np.zeros(2)\n train_features = filtered_dataframe\n train_labels = labels\n train_weights = weights\n\n if feat_dict[\"shuffle_after_split\"]:\n rint1 = random.randint(0, 100)\n rint2 = random.randint(0, 100)\n train_features = train_features.sample(\n frac=1, random_state=rint1).reset_index(drop=True)\n train_labels = train_labels.sample(frac=1, random_state=rint1).reset_index(drop=True)\n train_weights = pd.DataFrame(train_weights).sample(\n frac=1, random_state=rint1).reset_index(drop=True).to_numpy()[:, 0]\n test_features = test_features.sample(frac=1, random_state=rint2).reset_index(drop=True)\n test_labels = test_labels.sample(frac=1, random_state=rint2).reset_index(drop=True)\n test_weights = pd.DataFrame(test_weights).sample(\n frac=1, random_state=rint2).reset_index(drop=True).to_numpy()[:, 0]\n\n # Simplest way to reverse the order of training and test data:\n if self.freqai_config['feature_parameters'].get('reverse_train_test_order', False):\n return self.build_data_dictionary(\n test_features, train_features, test_labels,\n train_labels, test_weights, train_weights\n )\n else:\n return self.build_data_dictionary(\n train_features, test_features, train_labels,\n test_labels, train_weights, test_weights\n )\n\n def filter_features(\n self,\n unfiltered_df: DataFrame,\n training_feature_list: List,\n label_list: List = list(),\n training_filter: bool = True,\n ) -> Tuple[DataFrame, DataFrame]:\n \"\"\"\n Filter the unfiltered dataframe to extract the user requested features/labels and properly\n remove all NaNs. Any row with a NaN is removed from training dataset or replaced with\n 0s in the prediction dataset. However, prediction dataset do_predict will reflect any\n row that had a NaN and will shield user from that prediction.\n\n :param unfiltered_df: the full dataframe for the present training period\n :param training_feature_list: list, the training feature list constructed by\n self.build_feature_list() according to user specified\n parameters in the configuration file.\n :param labels: the labels for the dataset\n :param training_filter: boolean which lets the function know if it is training data or\n prediction data to be filtered.\n :returns:\n :filtered_df: dataframe cleaned of NaNs and only containing the user\n requested feature set.\n :labels: labels cleaned of NaNs.\n \"\"\"\n filtered_df = unfiltered_df.filter(training_feature_list, axis=1)\n filtered_df = filtered_df.replace([np.inf, -np.inf], np.nan)\n\n drop_index = pd.isnull(filtered_df).any(axis=1) # get the rows that have NaNs,\n drop_index = drop_index.replace(True, 1).replace(False, 0) # pep8 requirement.\n if (training_filter):\n\n # we don't care about total row number (total no. datapoints) in training, we only care\n # about removing any row with NaNs\n # if labels has multiple columns (user wants to train multiple modelEs), we detect here\n labels = unfiltered_df.filter(label_list, axis=1)\n drop_index_labels = pd.isnull(labels).any(axis=1)\n drop_index_labels = drop_index_labels.replace(True, 1).replace(False, 0)\n dates = unfiltered_df['date']\n filtered_df = filtered_df[\n (drop_index == 0) & (drop_index_labels == 0)\n ] # dropping values\n labels = labels[\n (drop_index == 0) & (drop_index_labels == 0)\n ] # assuming the labels depend entirely on the dataframe here.\n self.train_dates = dates[\n (drop_index == 0) & (drop_index_labels == 0)\n ]\n logger.info(\n f\"{self.pair}: dropped {len(unfiltered_df) - len(filtered_df)} training points\"\n f\" due to NaNs in populated dataset {len(unfiltered_df)}.\"\n )\n if len(unfiltered_df) == 0 and not self.live:\n raise OperationalException(\n f\"{self.pair}: all training data dropped due to NaNs. \"\n \"You likely did not download enough training data prior \"\n \"to your backtest timerange. Hint:\\n\"\n f\"{DOCS_LINK}/freqai-running/\"\n \"#downloading-data-to-cover-the-full-backtest-period\"\n )\n if (1 - len(filtered_df) / len(unfiltered_df)) > 0.1 and self.live:\n worst_indicator = str(unfiltered_df.count().idxmin())\n logger.warning(\n f\" {(1 - len(filtered_df)/len(unfiltered_df)) * 100:.0f} percent \"\n \" of training data dropped due to NaNs, model may perform inconsistent \"\n f\"with expectations. Verify {worst_indicator}\"\n )\n self.data[\"filter_drop_index_training\"] = drop_index\n\n else:\n\n # we are backtesting so we need to preserve row number to send back to strategy,\n # so now we use do_predict to avoid any prediction based on a NaN\n drop_index = pd.isnull(filtered_df).any(axis=1)\n self.data[\"filter_drop_index_prediction\"] = drop_index\n filtered_df.fillna(0, inplace=True)\n # replacing all NaNs with zeros to avoid issues in 'prediction', but any prediction\n # that was based on a single NaN is ultimately protected from buys with do_predict\n drop_index = ~drop_index\n self.do_predict = np.array(drop_index.replace(True, 1).replace(False, 0))\n if (len(self.do_predict) - self.do_predict.sum()) > 0:\n logger.info(\n \"dropped %s of %s prediction data points due to NaNs.\",\n len(self.do_predict) - self.do_predict.sum(),\n len(filtered_df),\n )\n labels = []\n\n return filtered_df, labels\n\n def build_data_dictionary(\n self,\n train_df: DataFrame,\n test_df: DataFrame,\n train_labels: DataFrame,\n test_labels: DataFrame,\n train_weights: Any,\n test_weights: Any,\n ) -> Dict:\n\n self.data_dictionary = {\n \"train_features\": train_df,\n \"test_features\": test_df,\n \"train_labels\": train_labels,\n \"test_labels\": test_labels,\n \"train_weights\": train_weights,\n \"test_weights\": test_weights,\n \"train_dates\": self.train_dates\n }\n\n return self.data_dictionary\n\n def split_timerange(\n self, tr: str, train_split: int = 28, bt_split: float = 7\n ) -> Tuple[list, list]:\n \"\"\"\n Function which takes a single time range (tr) and splits it\n into sub timeranges to train and backtest on based on user input\n tr: str, full timerange to train on\n train_split: the period length for the each training (days). Specified in user\n configuration file\n bt_split: the backtesting length (days). Specified in user configuration file\n \"\"\"\n\n if not isinstance(train_split, int) or train_split < 1:\n raise OperationalException(\n f\"train_period_days must be an integer greater than 0. Got {train_split}.\"\n )\n train_period_days = train_split * SECONDS_IN_DAY\n bt_period = bt_split * SECONDS_IN_DAY\n\n full_timerange = TimeRange.parse_timerange(tr)\n config_timerange = TimeRange.parse_timerange(self.config[\"timerange\"])\n if config_timerange.stopts == 0:\n config_timerange.stopts = int(\n datetime.now(tz=timezone.utc).timestamp()\n )\n timerange_train = copy.deepcopy(full_timerange)\n timerange_backtest = copy.deepcopy(full_timerange)\n\n tr_training_list = []\n tr_backtesting_list = []\n tr_training_list_timerange = []\n tr_backtesting_list_timerange = []\n first = True\n\n while True:\n if not first:\n timerange_train.startts = timerange_train.startts + int(bt_period)\n timerange_train.stopts = timerange_train.startts + train_period_days\n\n first = False\n tr_training_list.append(timerange_train.timerange_str)\n tr_training_list_timerange.append(copy.deepcopy(timerange_train))\n\n # associated backtest period\n timerange_backtest.startts = timerange_train.stopts\n timerange_backtest.stopts = timerange_backtest.startts + int(bt_period)\n\n if timerange_backtest.stopts > config_timerange.stopts:\n timerange_backtest.stopts = config_timerange.stopts\n\n tr_backtesting_list.append(timerange_backtest.timerange_str)\n tr_backtesting_list_timerange.append(copy.deepcopy(timerange_backtest))\n\n # ensure we are predicting on exactly same amount of data as requested by user defined\n # --timerange\n if timerange_backtest.stopts == config_timerange.stopts:\n break\n\n # print(tr_training_list, tr_backtesting_list)\n return tr_training_list_timerange, tr_backtesting_list_timerange\n\n def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame:\n \"\"\"\n Given a full dataframe, extract the user desired window\n :param tr: timerange string that we wish to extract from df\n :param df: Dataframe containing all candles to run the entire backtest. Here\n it is sliced down to just the present training period.\n \"\"\"\n if not self.live:\n df = df.loc[(df[\"date\"] >= timerange.startdt) & (df[\"date\"] < timerange.stopdt), :]\n else:\n df = df.loc[df[\"date\"] >= timerange.startdt, :]\n\n return df\n\n def find_features(self, dataframe: DataFrame) -> None:\n \"\"\"\n Find features in the strategy provided dataframe\n :param dataframe: DataFrame = strategy provided dataframe\n :return:\n features: list = the features to be used for training/prediction\n \"\"\"\n column_names = dataframe.columns\n features = [c for c in column_names if \"%\" in c]\n\n if not features:\n raise OperationalException(\"Could not find any features!\")\n\n self.training_features_list = features\n\n def find_labels(self, dataframe: DataFrame) -> None:\n column_names = dataframe.columns\n labels = [c for c in column_names if \"&\" in c]\n self.label_list = labels\n\n def set_weights_higher_recent(self, num_weights: int) -> npt.ArrayLike:\n \"\"\"\n Set weights so that recent data is more heavily weighted during\n training than older data.\n \"\"\"\n wfactor = self.config[\"freqai\"][\"feature_parameters\"][\"weight_factor\"]\n weights = np.exp(-np.arange(num_weights) / (wfactor * num_weights))[::-1]\n return weights\n\n def get_predictions_to_append(self, predictions: DataFrame,\n do_predict: npt.ArrayLike,\n dataframe_backtest: DataFrame) -> DataFrame:\n \"\"\"\n Get backtest prediction from current backtest period\n \"\"\"\n\n append_df = DataFrame()\n for label in predictions.columns:\n append_df[label] = predictions[label]\n if append_df[label].dtype == object:\n continue\n if \"labels_mean\" in self.data:\n append_df[f\"{label}_mean\"] = self.data[\"labels_mean\"][label]\n if \"labels_std\" in self.data:\n append_df[f\"{label}_std\"] = self.data[\"labels_std\"][label]\n\n for extra_col in self.data[\"extra_returns_per_train\"]:\n append_df[f\"{extra_col}\"] = self.data[\"extra_returns_per_train\"][extra_col]\n\n append_df[\"do_predict\"] = do_predict\n if self.freqai_config[\"feature_parameters\"].get(\"DI_threshold\", 0) > 0:\n append_df[\"DI_values\"] = self.DI_values\n\n dataframe_backtest.reset_index(drop=True, inplace=True)\n merged_df = pd.concat([dataframe_backtest[\"date\"], append_df], axis=1)\n return merged_df\n\n def append_predictions(self, append_df: DataFrame) -> None:\n \"\"\"\n Append backtest prediction from current backtest period to all previous periods\n \"\"\"\n\n if self.full_df.empty:\n self.full_df = append_df\n else:\n self.full_df = pd.concat([self.full_df, append_df], axis=0, ignore_index=True)\n\n def fill_predictions(self, dataframe):\n \"\"\"\n Back fill values to before the backtesting range so that the dataframe matches size\n when it goes back to the strategy. These rows are not included in the backtest.\n \"\"\"\n to_keep = [col for col in dataframe.columns if not col.startswith(\"&\")]\n self.return_dataframe = pd.merge(dataframe[to_keep],\n self.full_df, how='left', on='date')\n self.return_dataframe[self.full_df.columns] = (\n self.return_dataframe[self.full_df.columns].fillna(value=0))\n self.full_df = DataFrame()\n\n return\n\n def create_fulltimerange(self, backtest_tr: str, backtest_period_days: int) -> str:\n\n if not isinstance(backtest_period_days, int):\n raise OperationalException(\"backtest_period_days must be an integer\")\n\n if backtest_period_days < 0:\n raise OperationalException(\"backtest_period_days must be positive\")\n\n backtest_timerange = TimeRange.parse_timerange(backtest_tr)\n\n if backtest_timerange.stopts == 0:\n # typically open ended time ranges do work, however, there are some edge cases where\n # it does not. accommodating these kinds of edge cases just to allow open-ended\n # timerange is not high enough priority to warrant the effort. It is safer for now\n # to simply ask user to add their end date\n raise OperationalException(\"FreqAI backtesting does not allow open ended timeranges. \"\n \"Please indicate the end date of your desired backtesting. \"\n \"timerange.\")\n # backtest_timerange.stopts = int(\n # datetime.now(tz=timezone.utc).timestamp()\n # )\n\n backtest_timerange.startts = (\n backtest_timerange.startts - backtest_period_days * SECONDS_IN_DAY\n )\n full_timerange = backtest_timerange.timerange_str\n config_path = Path(self.config[\"config_files\"][0])\n\n if not self.full_path.is_dir():\n self.full_path.mkdir(parents=True, exist_ok=True)\n shutil.copy(\n config_path.resolve(),\n Path(self.full_path / config_path.parts[-1]),\n )\n\n return full_timerange\n\n def check_if_model_expired(self, trained_timestamp: int) -> bool:\n \"\"\"\n A model age checker to determine if the model is trustworthy based on user defined\n `expiration_hours` in the configuration file.\n :param trained_timestamp: int = The time of training for the most recent model.\n :return:\n bool = If the model is expired or not.\n \"\"\"\n time = datetime.now(tz=timezone.utc).timestamp()\n elapsed_time = (time - trained_timestamp) / 3600 # hours\n max_time = self.freqai_config.get(\"expiration_hours\", 0)\n if max_time > 0:\n return elapsed_time > max_time\n else:\n return False\n\n def check_if_new_training_required(\n self, trained_timestamp: int\n ) -> Tuple[bool, TimeRange, TimeRange]:\n\n time = datetime.now(tz=timezone.utc).timestamp()\n trained_timerange = TimeRange()\n data_load_timerange = TimeRange()\n\n timeframes = self.freqai_config[\"feature_parameters\"].get(\"include_timeframes\")\n\n max_tf_seconds = 0\n for tf in timeframes:\n secs = timeframe_to_seconds(tf)\n if secs > max_tf_seconds:\n max_tf_seconds = secs\n\n # We notice that users like to use exotic indicators where\n # they do not know the required timeperiod. Here we include a factor\n # of safety by multiplying the user considered \"max\" by 2.\n max_period = self.config.get('startup_candle_count', 20) * 2\n additional_seconds = max_period * max_tf_seconds\n\n if trained_timestamp != 0:\n elapsed_time = (time - trained_timestamp) / SECONDS_IN_HOUR\n retrain = elapsed_time > self.freqai_config.get(\"live_retrain_hours\", 0)\n if retrain:\n trained_timerange.startts = int(\n time - self.freqai_config.get(\"train_period_days\", 0) * SECONDS_IN_DAY\n )\n trained_timerange.stopts = int(time)\n # we want to load/populate indicators on more data than we plan to train on so\n # because most of the indicators have a rolling timeperiod, and are thus NaNs\n # unless they have data further back in time before the start of the train period\n data_load_timerange.startts = int(\n time\n - self.freqai_config.get(\"train_period_days\", 0) * SECONDS_IN_DAY\n - additional_seconds\n )\n data_load_timerange.stopts = int(time)\n else: # user passed no live_trained_timerange in config\n trained_timerange.startts = int(\n time - self.freqai_config.get(\"train_period_days\", 0) * SECONDS_IN_DAY\n )\n trained_timerange.stopts = int(time)\n\n data_load_timerange.startts = int(\n time\n - self.freqai_config.get(\"train_period_days\", 0) * SECONDS_IN_DAY\n - additional_seconds\n )\n data_load_timerange.stopts = int(time)\n retrain = True\n\n return retrain, trained_timerange, data_load_timerange\n\n def set_new_model_names(self, pair: str, timestamp_id: int):\n\n coin, _ = pair.split(\"/\")\n self.data_path = Path(\n self.full_path\n / f\"sub-train-{pair.split('/')[0]}_{timestamp_id}\"\n )\n\n self.model_filename = f\"cb_{coin.lower()}_{timestamp_id}\"\n\n def set_all_pairs(self) -> None:\n\n self.all_pairs = copy.deepcopy(\n self.freqai_config[\"feature_parameters\"].get(\"include_corr_pairlist\", [])\n )\n for pair in self.config.get(\"exchange\", \"\").get(\"pair_whitelist\"):\n if pair not in self.all_pairs:\n self.all_pairs.append(pair)\n\n def extract_corr_pair_columns_from_populated_indicators(\n self,\n dataframe: DataFrame\n ) -> Dict[str, DataFrame]:\n \"\"\"\n Find the columns of the dataframe corresponding to the corr_pairlist, save them\n in a dictionary to be reused and attached to other pairs.\n\n :param dataframe: fully populated dataframe (current pair + corr_pairs)\n :return: corr_dataframes, dictionary of dataframes to be attached\n to other pairs in same candle.\n \"\"\"\n corr_dataframes: Dict[str, DataFrame] = {}\n pairs = self.freqai_config[\"feature_parameters\"].get(\"include_corr_pairlist\", [])\n\n for pair in pairs:\n pair = pair.replace(':', '') # lightgbm doesnt like colons\n pair_cols = [col for col in dataframe.columns if col.startswith(\"%\")\n and f\"{pair}_\" in col]\n\n if pair_cols:\n pair_cols.insert(0, 'date')\n corr_dataframes[pair] = dataframe.filter(pair_cols, axis=1)\n\n return corr_dataframes\n\n def attach_corr_pair_columns(self, dataframe: DataFrame,\n corr_dataframes: Dict[str, DataFrame],\n current_pair: str) -> DataFrame:\n \"\"\"\n Attach the existing corr_pair dataframes to the current pair dataframe before training\n\n :param dataframe: current pair strategy dataframe, indicators populated already\n :param corr_dataframes: dictionary of saved dataframes from earlier in the same candle\n :param current_pair: current pair to which we will attach corr pair dataframe\n :return:\n :dataframe: current pair dataframe of populated indicators, concatenated with corr_pairs\n ready for training\n \"\"\"\n pairs = self.freqai_config[\"feature_parameters\"].get(\"include_corr_pairlist\", [])\n current_pair = current_pair.replace(':', '')\n for pair in pairs:\n pair = pair.replace(':', '') # lightgbm doesnt work with colons\n if current_pair != pair:\n dataframe = dataframe.merge(corr_dataframes[pair], how='left', on='date')\n\n return dataframe\n\n def get_pair_data_for_features(self,\n pair: str,\n tf: str,\n strategy: IStrategy,\n corr_dataframes: dict = {},\n base_dataframes: dict = {},\n is_corr_pairs: bool = False) -> DataFrame:\n \"\"\"\n Get the data for the pair. If it's not in the dictionary, get it from the data provider\n :param pair: str = pair to get data for\n :param tf: str = timeframe to get data for\n :param strategy: IStrategy = user defined strategy object\n :param corr_dataframes: dict = dict containing the df pair dataframes\n (for user defined timeframes)\n :param base_dataframes: dict = dict containing the current pair dataframes\n (for user defined timeframes)\n :param is_corr_pairs: bool = whether the pair is a corr pair or not\n :return: dataframe = dataframe containing the pair data\n \"\"\"\n if is_corr_pairs:\n dataframe = corr_dataframes[pair][tf]\n if not dataframe.empty:\n return dataframe\n else:\n dataframe = strategy.dp.get_pair_dataframe(pair=pair, timeframe=tf)\n return dataframe\n else:\n dataframe = base_dataframes[tf]\n if not dataframe.empty:\n return dataframe\n else:\n dataframe = strategy.dp.get_pair_dataframe(pair=pair, timeframe=tf)\n return dataframe\n\n def merge_features(self, df_main: DataFrame, df_to_merge: DataFrame,\n tf: str, timeframe_inf: str, suffix: str) -> DataFrame:\n \"\"\"\n Merge the features of the dataframe and remove HLCV and date added columns\n :param df_main: DataFrame = main dataframe\n :param df_to_merge: DataFrame = dataframe to merge\n :param tf: str = timeframe of the main dataframe\n :param timeframe_inf: str = timeframe of the dataframe to merge\n :param suffix: str = suffix to add to the columns of the dataframe to merge\n :return: dataframe = merged dataframe\n \"\"\"\n dataframe = merge_informative_pair(df_main, df_to_merge, tf, timeframe_inf=timeframe_inf,\n append_timeframe=False, suffix=suffix, ffill=True)\n skip_columns = [\n (f\"{s}_{suffix}\") for s in [\"date\", \"open\", \"high\", \"low\", \"close\", \"volume\"]\n ]\n dataframe = dataframe.drop(columns=skip_columns)\n return dataframe\n\n def populate_features(self, dataframe: DataFrame, pair: str, strategy: IStrategy,\n corr_dataframes: dict, base_dataframes: dict,\n is_corr_pairs: bool = False) -> DataFrame:\n \"\"\"\n Use the user defined strategy functions for populating features\n :param dataframe: DataFrame = dataframe to populate\n :param pair: str = pair to populate\n :param strategy: IStrategy = user defined strategy object\n :param corr_dataframes: dict = dict containing the df pair dataframes\n :param base_dataframes: dict = dict containing the current pair dataframes\n :param is_corr_pairs: bool = whether the pair is a corr pair or not\n :return: dataframe = populated dataframe\n \"\"\"\n tfs: List[str] = self.freqai_config[\"feature_parameters\"].get(\"include_timeframes\")\n\n for tf in tfs:\n metadata = {\"pair\": pair, \"tf\": tf}\n informative_df = self.get_pair_data_for_features(\n pair, tf, strategy, corr_dataframes, base_dataframes, is_corr_pairs)\n informative_copy = informative_df.copy()\n\n for t in self.freqai_config[\"feature_parameters\"][\"indicator_periods_candles\"]:\n df_features = strategy.feature_engineering_expand_all(\n informative_copy.copy(), t, metadata=metadata)\n suffix = f\"{t}\"\n informative_df = self.merge_features(informative_df, df_features, tf, tf, suffix)\n\n generic_df = strategy.feature_engineering_expand_basic(\n informative_copy.copy(), metadata=metadata)\n suffix = \"gen\"\n\n informative_df = self.merge_features(informative_df, generic_df, tf, tf, suffix)\n\n indicators = [col for col in informative_df if col.startswith(\"%\")]\n for n in range(self.freqai_config[\"feature_parameters\"][\"include_shifted_candles\"] + 1):\n if n == 0:\n continue\n df_shift = informative_df[indicators].shift(n)\n df_shift = df_shift.add_suffix(\"_shift-\" + str(n))\n informative_df = pd.concat((informative_df, df_shift), axis=1)\n\n dataframe = self.merge_features(dataframe.copy(), informative_df,\n self.config[\"timeframe\"], tf, f'{pair}_{tf}')\n\n return dataframe\n\n def use_strategy_to_populate_indicators( # noqa: C901\n self,\n strategy: IStrategy,\n corr_dataframes: dict = {},\n base_dataframes: dict = {},\n pair: str = \"\",\n prediction_dataframe: DataFrame = pd.DataFrame(),\n do_corr_pairs: bool = True,\n ) -> DataFrame:\n \"\"\"\n Use the user defined strategy for populating indicators during retrain\n :param strategy: IStrategy = user defined strategy object\n :param corr_dataframes: dict = dict containing the df pair dataframes\n (for user defined timeframes)\n :param base_dataframes: dict = dict containing the current pair dataframes\n (for user defined timeframes)\n :param pair: str = pair to populate\n :param prediction_dataframe: DataFrame = dataframe containing the pair data\n used for prediction\n :param do_corr_pairs: bool = whether to populate corr pairs or not\n :return:\n dataframe: DataFrame = dataframe containing populated indicators\n \"\"\"\n\n # check if the user is using the deprecated populate_any_indicators function\n new_version = inspect.getsource(strategy.populate_any_indicators) == (\n inspect.getsource(IStrategy.populate_any_indicators))\n\n if not new_version:\n raise OperationalException(\n \"You are using the `populate_any_indicators()` function\"\n \" which was deprecated on March 1, 2023. Please refer \"\n \"to the strategy migration guide to use the new \"\n \"feature_engineering_* methods: \\n\"\n f\"{DOCS_LINK}/strategy_migration/#freqai-strategy \\n\"\n \"And the feature_engineering_* documentation: \\n\"\n f\"{DOCS_LINK}/freqai-feature-engineering/\"\n )\n\n tfs: List[str] = self.freqai_config[\"feature_parameters\"].get(\"include_timeframes\")\n pairs: List[str] = self.freqai_config[\"feature_parameters\"].get(\n \"include_corr_pairlist\", [])\n\n for tf in tfs:\n if tf not in base_dataframes:\n base_dataframes[tf] = pd.DataFrame()\n for p in pairs:\n if p not in corr_dataframes:\n corr_dataframes[p] = {}\n if tf not in corr_dataframes[p]:\n corr_dataframes[p][tf] = pd.DataFrame()\n\n if not prediction_dataframe.empty:\n dataframe = prediction_dataframe.copy()\n else:\n dataframe = base_dataframes[self.config[\"timeframe\"]].copy()\n\n corr_pairs: List[str] = self.freqai_config[\"feature_parameters\"].get(\n \"include_corr_pairlist\", [])\n dataframe = self.populate_features(dataframe.copy(), pair, strategy,\n corr_dataframes, base_dataframes)\n metadata = {\"pair\": pair}\n dataframe = strategy.feature_engineering_standard(dataframe.copy(), metadata=metadata)\n # ensure corr pairs are always last\n for corr_pair in corr_pairs:\n if pair == corr_pair:\n continue # dont repeat anything from whitelist\n if corr_pairs and do_corr_pairs:\n dataframe = self.populate_features(dataframe.copy(), corr_pair, strategy,\n corr_dataframes, base_dataframes, True)\n\n if self.live:\n dataframe = strategy.set_freqai_targets(dataframe.copy(), metadata=metadata)\n dataframe = self.remove_special_chars_from_feature_names(dataframe)\n\n self.get_unique_classes_from_labels(dataframe)\n\n if self.config.get('reduce_df_footprint', False):\n dataframe = reduce_dataframe_footprint(dataframe)\n\n return dataframe\n\n def fit_labels(self) -> None:\n \"\"\"\n Fit the labels with a gaussian distribution\n \"\"\"\n import scipy as spy\n\n self.data[\"labels_mean\"], self.data[\"labels_std\"] = {}, {}\n for label in self.data_dictionary[\"train_labels\"].columns:\n if self.data_dictionary[\"train_labels\"][label].dtype == object:\n continue\n f = spy.stats.norm.fit(self.data_dictionary[\"train_labels\"][label])\n self.data[\"labels_mean\"][label], self.data[\"labels_std\"][label] = f[0], f[1]\n\n # incase targets are classifications\n for label in self.unique_class_list:\n self.data[\"labels_mean\"][label], self.data[\"labels_std\"][label] = 0, 0\n\n return\n\n def remove_features_from_df(self, dataframe: DataFrame) -> DataFrame:\n \"\"\"\n Remove the features from the dataframe before returning it to strategy. This keeps it\n compact for Frequi purposes.\n \"\"\"\n to_keep = [\n col for col in dataframe.columns if not col.startswith(\"%\") or col.startswith(\"%%\")\n ]\n return dataframe[to_keep]\n\n def get_unique_classes_from_labels(self, dataframe: DataFrame) -> None:\n\n # self.find_features(dataframe)\n self.find_labels(dataframe)\n\n for key in self.label_list:\n if dataframe[key].dtype == object:\n self.unique_classes[key] = dataframe[key].dropna().unique()\n\n if self.unique_classes:\n for label in self.unique_classes:\n self.unique_class_list += list(self.unique_classes[label])\n\n def save_backtesting_prediction(\n self, append_df: DataFrame\n ) -> None:\n \"\"\"\n Save prediction dataframe from backtesting to feather file format\n :param append_df: dataframe for backtesting period\n \"\"\"\n full_predictions_folder = Path(self.full_path / self.backtest_predictions_folder)\n if not full_predictions_folder.is_dir():\n full_predictions_folder.mkdir(parents=True, exist_ok=True)\n\n append_df.to_feather(self.backtesting_results_path)\n\n def get_backtesting_prediction(\n self\n ) -> DataFrame:\n \"\"\"\n Get prediction dataframe from feather file format\n \"\"\"\n append_df = pd.read_feather(self.backtesting_results_path)\n return append_df\n\n def check_if_backtest_prediction_is_valid(\n self,\n len_backtest_df: int\n ) -> bool:\n \"\"\"\n Check if a backtesting prediction already exists and if the predictions\n to append have the same size as the backtesting dataframe slice\n :param length_backtesting_dataframe: Length of backtesting dataframe slice\n :return:\n :boolean: whether the prediction file is valid.\n \"\"\"\n path_to_predictionfile = Path(self.full_path /\n self.backtest_predictions_folder /\n f\"{self.model_filename}_prediction.feather\")\n self.backtesting_results_path = path_to_predictionfile\n\n file_exists = path_to_predictionfile.is_file()\n\n if file_exists:\n append_df = self.get_backtesting_prediction()\n if len(append_df) == len_backtest_df and 'date' in append_df:\n logger.info(f\"Found backtesting prediction file at {path_to_predictionfile}\")\n return True\n else:\n logger.info(\"A new backtesting prediction file is required. \"\n \"(Number of predictions is different from dataframe length or \"\n \"old prediction file version).\")\n return False\n else:\n logger.info(\n f\"Could not find backtesting prediction file at {path_to_predictionfile}\"\n )\n return False\n\n def get_full_models_path(self, config: Config) -> Path:\n \"\"\"\n Returns default FreqAI model path\n :param config: Configuration dictionary\n \"\"\"\n freqai_config: Dict[str, Any] = config[\"freqai\"]\n return Path(\n config[\"user_data_dir\"] / \"models\" / str(freqai_config.get(\"identifier\"))\n )\n\n def remove_special_chars_from_feature_names(self, dataframe: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Remove all special characters from feature strings (:)\n :param dataframe: the dataframe that just finished indicator population. (unfiltered)\n :return: dataframe with cleaned featrue names\n \"\"\"\n\n spec_chars = [':']\n for c in spec_chars:\n dataframe.columns = dataframe.columns.str.replace(c, \"\")\n\n return dataframe\n\n def buffer_timerange(self, timerange: TimeRange):\n \"\"\"\n Buffer the start and end of the timerange. This is used *after* the indicators\n are populated.\n\n The main example use is when predicting maxima and minima, the argrelextrema\n function cannot know the maxima/minima at the edges of the timerange. To improve\n model accuracy, it is best to compute argrelextrema on the full timerange\n and then use this function to cut off the edges (buffer) by the kernel.\n\n In another case, if the targets are set to a shifted price movement, this\n buffer is unnecessary because the shifted candles at the end of the timerange\n will be NaN and FreqAI will automatically cut those off of the training\n dataset.\n \"\"\"\n buffer = self.freqai_config[\"feature_parameters\"][\"buffer_train_data_candles\"]\n if buffer:\n timerange.stopts -= buffer * timeframe_to_seconds(self.config[\"timeframe\"])\n timerange.startts += buffer * timeframe_to_seconds(self.config[\"timeframe\"])\n\n return timerange\n\n # deprecated functions\n def normalize_data(self, data_dictionary: Dict) -> Dict[Any, Any]:\n \"\"\"\n Deprecation warning, migration assistance\n \"\"\"\n logger.warning(f\"Your custom IFreqaiModel relies on the deprecated\"\n \" data pipeline. Please update your model to use the new data pipeline.\"\n \" This can be achieved by following the migration guide at \"\n f\"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline \"\n \"We added a basic pipeline for you, but this will be removed \"\n \"in a future version.\")\n\n return data_dictionary\n\n def denormalize_labels_from_metadata(self, df: DataFrame) -> DataFrame:\n \"\"\"\n Deprecation warning, migration assistance\n \"\"\"\n logger.warning(f\"Your custom IFreqaiModel relies on the deprecated\"\n \" data pipeline. Please update your model to use the new data pipeline.\"\n \" This can be achieved by following the migration guide at \"\n f\"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline \"\n \"We added a basic pipeline for you, but this will be removed \"\n \"in a future version.\")\n\n pred_df, _, _ = self.label_pipeline.inverse_transform(df)\n\n return pred_df" }, { "identifier": "get_patched_exchange", "path": "tests/conftest.py", "snippet": "def get_patched_exchange(mocker, config, api_mock=None, id='binance',\n mock_markets=True, mock_supported_modes=True) -> Exchange:\n patch_exchange(mocker, api_mock, id, mock_markets, mock_supported_modes)\n config['exchange']['name'] = id\n try:\n exchange = ExchangeResolver.load_exchange(config, load_leverage_tiers=True)\n except ImportError:\n exchange = Exchange(config)\n return exchange" }, { "identifier": "get_patched_data_kitchen", "path": "tests/freqai/conftest.py", "snippet": "def get_patched_data_kitchen(mocker, freqaiconf):\n dk = FreqaiDataKitchen(freqaiconf)\n return dk" }, { "identifier": "get_patched_freqai_strategy", "path": "tests/freqai/conftest.py", "snippet": "def get_patched_freqai_strategy(mocker, freqaiconf):\n strategy = StrategyResolver.load_strategy(freqaiconf)\n strategy.ft_bot_start()\n\n return strategy" }, { "identifier": "make_unfiltered_dataframe", "path": "tests/freqai/conftest.py", "snippet": "def make_unfiltered_dataframe(mocker, freqai_conf):\n freqai_conf.update({\"timerange\": \"20180110-20180130\"})\n\n strategy = get_patched_freqai_strategy(mocker, freqai_conf)\n exchange = get_patched_exchange(mocker, freqai_conf)\n strategy.dp = DataProvider(freqai_conf, exchange)\n strategy.freqai_info = freqai_conf.get(\"freqai\", {})\n freqai = strategy.freqai\n freqai.live = True\n freqai.dk = FreqaiDataKitchen(freqai_conf)\n freqai.dk.live = True\n freqai.dk.pair = \"ADA/BTC\"\n data_load_timerange = TimeRange.parse_timerange(\"20180110-20180130\")\n freqai.dd.load_all_pair_histories(data_load_timerange, freqai.dk)\n\n freqai.dd.pair_dict = MagicMock()\n\n new_timerange = TimeRange.parse_timerange(\"20180120-20180130\")\n\n corr_dataframes, base_dataframes = freqai.dd.get_base_and_corr_dataframes(\n data_load_timerange, freqai.dk.pair, freqai.dk\n )\n\n unfiltered_dataframe = freqai.dk.use_strategy_to_populate_indicators(\n strategy, corr_dataframes, base_dataframes, freqai.dk.pair\n )\n for i in range(5):\n unfiltered_dataframe[f'constant_{i}'] = i\n\n unfiltered_dataframe = freqai.dk.slice_dataframe(new_timerange, unfiltered_dataframe)\n\n return freqai, unfiltered_dataframe" }, { "identifier": "is_mac", "path": "tests/freqai/test_freqai_interface.py", "snippet": "def is_py11() -> bool:\ndef is_arm() -> bool:\ndef can_run_model(model: str) -> None:\ndef test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,\n dbscan, float32, can_short, shuffle,\n buffer, noise):\ndef test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model, strat):\ndef test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):\ndef test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog):\ndef test_start_backtesting_subdaily_backtest_period(mocker, freqai_conf):\ndef test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):\ndef test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog):\ndef test_plot_feature_importance(mocker, freqai_conf):\ndef test_freqai_informative_pairs(mocker, freqai_conf, timeframes, corr_pairs):\ndef test_start_set_train_queue(mocker, freqai_conf, caplog):\ndef test_get_required_data_timerange(mocker, freqai_conf):\ndef test_download_all_data_for_training(mocker, freqai_conf, caplog, tmpdir):\ndef test_get_state_info(mocker, freqai_conf, dp_exists, caplog, tickers):" } ]
import shutil import pytest from datetime import datetime, timedelta, timezone from pathlib import Path from unittest.mock import MagicMock from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from tests.conftest import get_patched_exchange from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy, make_unfiltered_dataframe) from tests.freqai.test_freqai_interface import is_mac
18,399
@pytest.mark.parametrize( "timerange, train_period_days, expected_result", [ ("20220101-20220201", 30, "20211202-20220201"), ("20220301-20220401", 15, "20220214-20220401"), ], ) def test_create_fulltimerange( timerange, train_period_days, expected_result, freqai_conf, mocker, caplog ): dk = get_patched_data_kitchen(mocker, freqai_conf) assert dk.create_fulltimerange(timerange, train_period_days) == expected_result shutil.rmtree(Path(dk.full_path)) def test_create_fulltimerange_incorrect_backtest_period(mocker, freqai_conf): dk = get_patched_data_kitchen(mocker, freqai_conf) with pytest.raises(OperationalException, match=r"backtest_period_days must be an integer"): dk.create_fulltimerange("20220101-20220201", 0.5) with pytest.raises(OperationalException, match=r"backtest_period_days must be positive"): dk.create_fulltimerange("20220101-20220201", -1) shutil.rmtree(Path(dk.full_path)) @pytest.mark.parametrize( "timerange, train_period_days, backtest_period_days, expected_result", [ ("20220101-20220201", 30, 7, 9), ("20220101-20220201", 30, 0.5, 120), ("20220101-20220201", 10, 1, 80), ], ) def test_split_timerange( mocker, freqai_conf, timerange, train_period_days, backtest_period_days, expected_result ): freqai_conf.update({"timerange": "20220101-20220401"}) dk = get_patched_data_kitchen(mocker, freqai_conf) tr_list, bt_list = dk.split_timerange(timerange, train_period_days, backtest_period_days) assert len(tr_list) == len(bt_list) == expected_result with pytest.raises( OperationalException, match=r"train_period_days must be an integer greater than 0." ): dk.split_timerange("20220101-20220201", -1, 0.5) shutil.rmtree(Path(dk.full_path)) def test_check_if_model_expired(mocker, freqai_conf): dk = get_patched_data_kitchen(mocker, freqai_conf) now = datetime.now(tz=timezone.utc).timestamp() assert dk.check_if_model_expired(now) is False now = (datetime.now(tz=timezone.utc) - timedelta(hours=2)).timestamp() assert dk.check_if_model_expired(now) is True shutil.rmtree(Path(dk.full_path)) def test_filter_features(mocker, freqai_conf): freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf) freqai.dk.find_features(unfiltered_dataframe) filtered_df, labels = freqai.dk.filter_features( unfiltered_dataframe, freqai.dk.training_features_list, freqai.dk.label_list, training_filter=True, ) assert len(filtered_df.columns) == 14 def test_make_train_test_datasets(mocker, freqai_conf): freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf) freqai.dk.find_features(unfiltered_dataframe) features_filtered, labels_filtered = freqai.dk.filter_features( unfiltered_dataframe, freqai.dk.training_features_list, freqai.dk.label_list, training_filter=True, ) data_dictionary = freqai.dk.make_train_test_datasets(features_filtered, labels_filtered) assert data_dictionary assert len(data_dictionary) == 7 assert len(data_dictionary['train_features'].index) == 1916 @pytest.mark.parametrize('model', [ 'LightGBMRegressor' ]) def test_get_full_model_path(mocker, freqai_conf, model): freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"strategy": "freqai_test_strat"}) if is_mac(): pytest.skip("Mac is confused during this test for unknown reasons") strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) strategy.freqai_info = freqai_conf.get("freqai", {}) freqai = strategy.freqai freqai.live = True freqai.dk = FreqaiDataKitchen(freqai_conf) freqai.dk.live = True
@pytest.mark.parametrize( "timerange, train_period_days, expected_result", [ ("20220101-20220201", 30, "20211202-20220201"), ("20220301-20220401", 15, "20220214-20220401"), ], ) def test_create_fulltimerange( timerange, train_period_days, expected_result, freqai_conf, mocker, caplog ): dk = get_patched_data_kitchen(mocker, freqai_conf) assert dk.create_fulltimerange(timerange, train_period_days) == expected_result shutil.rmtree(Path(dk.full_path)) def test_create_fulltimerange_incorrect_backtest_period(mocker, freqai_conf): dk = get_patched_data_kitchen(mocker, freqai_conf) with pytest.raises(OperationalException, match=r"backtest_period_days must be an integer"): dk.create_fulltimerange("20220101-20220201", 0.5) with pytest.raises(OperationalException, match=r"backtest_period_days must be positive"): dk.create_fulltimerange("20220101-20220201", -1) shutil.rmtree(Path(dk.full_path)) @pytest.mark.parametrize( "timerange, train_period_days, backtest_period_days, expected_result", [ ("20220101-20220201", 30, 7, 9), ("20220101-20220201", 30, 0.5, 120), ("20220101-20220201", 10, 1, 80), ], ) def test_split_timerange( mocker, freqai_conf, timerange, train_period_days, backtest_period_days, expected_result ): freqai_conf.update({"timerange": "20220101-20220401"}) dk = get_patched_data_kitchen(mocker, freqai_conf) tr_list, bt_list = dk.split_timerange(timerange, train_period_days, backtest_period_days) assert len(tr_list) == len(bt_list) == expected_result with pytest.raises( OperationalException, match=r"train_period_days must be an integer greater than 0." ): dk.split_timerange("20220101-20220201", -1, 0.5) shutil.rmtree(Path(dk.full_path)) def test_check_if_model_expired(mocker, freqai_conf): dk = get_patched_data_kitchen(mocker, freqai_conf) now = datetime.now(tz=timezone.utc).timestamp() assert dk.check_if_model_expired(now) is False now = (datetime.now(tz=timezone.utc) - timedelta(hours=2)).timestamp() assert dk.check_if_model_expired(now) is True shutil.rmtree(Path(dk.full_path)) def test_filter_features(mocker, freqai_conf): freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf) freqai.dk.find_features(unfiltered_dataframe) filtered_df, labels = freqai.dk.filter_features( unfiltered_dataframe, freqai.dk.training_features_list, freqai.dk.label_list, training_filter=True, ) assert len(filtered_df.columns) == 14 def test_make_train_test_datasets(mocker, freqai_conf): freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf) freqai.dk.find_features(unfiltered_dataframe) features_filtered, labels_filtered = freqai.dk.filter_features( unfiltered_dataframe, freqai.dk.training_features_list, freqai.dk.label_list, training_filter=True, ) data_dictionary = freqai.dk.make_train_test_datasets(features_filtered, labels_filtered) assert data_dictionary assert len(data_dictionary) == 7 assert len(data_dictionary['train_features'].index) == 1916 @pytest.mark.parametrize('model', [ 'LightGBMRegressor' ]) def test_get_full_model_path(mocker, freqai_conf, model): freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"strategy": "freqai_test_strat"}) if is_mac(): pytest.skip("Mac is confused during this test for unknown reasons") strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) strategy.freqai_info = freqai_conf.get("freqai", {}) freqai = strategy.freqai freqai.live = True freqai.dk = FreqaiDataKitchen(freqai_conf) freqai.dk.live = True
timerange = TimeRange.parse_timerange("20180110-20180130")
0
2023-10-21 10:02:05+00:00
24k
yanzhh/HGERE
transformers/src/transformers/modeling_distilautomcbert.py
[ { "identifier": "gelu", "path": "transformers/src/transformers/activations.py", "snippet": "def swish(x):\ndef _gelu_python(x):\ndef gelu_new(x):\ndef get_activation(activation_string):\nACT2FN = {\n \"relu\": F.relu,\n \"swish\": swish,\n \"gelu\": gelu,\n \"tanh\": F.tanh,\n \"gelu_new\": gelu_new,\n}" }, { "identifier": "DistilBertConfig", "path": "transformers/src/transformers/configuration_distilbert.py", "snippet": "class DistilBertConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a :class:`~transformers.DistilBertModel`.\n It is used to instantiate a DistilBERT model according to the specified arguments, defining the model\n architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of\n the DistilBERT `distilbert-base-uncased <https://huggingface.co/distilbert-base-uncased>`__ architecture.\n\n Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used\n to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`\n for more information.\n\n\n Args:\n vocab_size (:obj:`int`, optional, defaults to 30522):\n Vocabulary size of the DistilBERT model. Defines the different tokens that\n can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.\n max_position_embeddings (:obj:`int`, optional, defaults to 512):\n The maximum sequence length that this model might ever be used with.\n Typically set this to something large just in case (e.g., 512 or 1024 or 2048).\n sinusoidal_pos_embds (:obj:`boolean`, optional, defaults to :obj:`False`):\n Whether to use sinusoidal positional embeddings.\n n_layers (:obj:`int`, optional, defaults to 6):\n Number of hidden layers in the Transformer encoder.\n n_heads (:obj:`int`, optional, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n dim (:obj:`int`, optional, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n hidden_dim (:obj:`int`, optional, defaults to 3072):\n The size of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n dropout (:obj:`float`, optional, defaults to 0.1):\n The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (:obj:`float`, optional, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n activation (:obj:`str` or :obj:`function`, optional, defaults to \"gelu\"):\n The non-linear activation function (function or string) in the encoder and pooler.\n If string, \"gelu\", \"relu\", \"swish\" and \"gelu_new\" are supported.\n initializer_range (:obj:`float`, optional, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n qa_dropout (:obj:`float`, optional, defaults to 0.1):\n The dropout probabilities used in the question answering model\n :class:`~tranformers.DistilBertForQuestionAnswering`.\n seq_classif_dropout (:obj:`float`, optional, defaults to 0.2):\n The dropout probabilities used in the sequence classification model\n :class:`~tranformers.DistilBertForSequenceClassification`.\n\n Example::\n\n from transformers import DistilBertModel, DistilBertConfig\n\n # Initializing a DistilBERT configuration\n configuration = DistilBertConfig()\n\n # Initializing a model from the configuration\n model = DistilBertModel(configuration)\n\n # Accessing the model configuration\n configuration = model.config\n\n Attributes:\n pretrained_config_archive_map (Dict[str, str]):\n A dictionary containing all the available pre-trained checkpoints.\n \"\"\"\n pretrained_config_archive_map = DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP\n model_type = \"distilbert\"\n\n def __init__(\n self,\n vocab_size=30522,\n max_position_embeddings=512,\n sinusoidal_pos_embds=False,\n n_layers=6,\n n_heads=12,\n dim=768,\n hidden_dim=4 * 768,\n dropout=0.1,\n attention_dropout=0.1,\n activation=\"gelu\",\n initializer_range=0.02,\n qa_dropout=0.1,\n seq_classif_dropout=0.2,\n **kwargs\n ):\n super().__init__(**kwargs)\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.sinusoidal_pos_embds = sinusoidal_pos_embds\n self.n_layers = n_layers\n self.n_heads = n_heads\n self.dim = dim\n self.hidden_dim = hidden_dim\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation = activation\n self.initializer_range = initializer_range\n self.qa_dropout = qa_dropout\n self.seq_classif_dropout = seq_classif_dropout\n\n @property\n def hidden_size(self):\n return self.dim\n\n @property\n def num_attention_heads(self):\n return self.n_heads\n\n @property\n def num_hidden_layers(self):\n return self.n_layers" }, { "identifier": "add_start_docstrings", "path": "transformers/src/transformers/file_utils.py", "snippet": "def add_start_docstrings(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" }, { "identifier": "add_start_docstrings_to_callable", "path": "transformers/src/transformers/file_utils.py", "snippet": "def add_start_docstrings_to_callable(*docstr):\n def docstring_decorator(fn):\n class_name = \":class:`~transformers.{}`\".format(fn.__qualname__.split(\".\")[0])\n intro = \" The {} forward method, overrides the :func:`__call__` special method.\".format(class_name)\n note = r\"\"\"\n\n .. note::\n Although the recipe for forward pass needs to be defined within\n this function, one should call the :class:`Module` instance afterwards\n instead of this since the former takes care of running the\n pre and post processing steps while the latter silently ignores them.\n \"\"\"\n fn.__doc__ = intro + note + \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" }, { "identifier": "PreTrainedModel", "path": "transformers/src/transformers/modeling_utils.py", "snippet": "class PreTrainedModel(nn.Module, ModuleUtilsMixin):\n r\"\"\" Base class for all models.\n\n :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models\n as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.\n\n Class attributes (overridden by derived classes):\n - ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.\n - ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.\n - ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:\n\n - ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,\n - ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,\n - ``path``: a path (string) to the TensorFlow checkpoint.\n\n - ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.\n \"\"\"\n config_class = None\n pretrained_model_archive_map = {}\n base_model_prefix = \"\"\n\n @property\n def dummy_inputs(self):\n \"\"\" Dummy inputs to do a forward pass in the network.\n\n Returns:\n torch.Tensor with dummy inputs\n \"\"\"\n return {\"input_ids\": torch.tensor(DUMMY_INPUTS)}\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__()\n if not isinstance(config, PretrainedConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. \"\n \"To create a model from a pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n )\n )\n # Save config in model\n self.config = config\n\n @property\n def base_model(self):\n return getattr(self, self.base_model_prefix, self)\n\n def get_input_embeddings(self):\n \"\"\"\n Returns the model's input embeddings.\n\n Returns:\n :obj:`nn.Module`:\n A torch module mapping vocabulary to hidden states.\n \"\"\"\n base_model = getattr(self, self.base_model_prefix, self)\n if base_model is not self:\n return base_model.get_input_embeddings()\n else:\n raise NotImplementedError\n\n def set_input_embeddings(self, value):\n \"\"\"\n Set model's input embeddings\n\n Args:\n value (:obj:`nn.Module`):\n A module mapping vocabulary to hidden states.\n \"\"\"\n base_model = getattr(self, self.base_model_prefix, self)\n if base_model is not self:\n base_model.set_input_embeddings(value)\n else:\n raise NotImplementedError\n\n def get_output_embeddings(self):\n \"\"\"\n Returns the model's output embeddings.\n\n Returns:\n :obj:`nn.Module`:\n A torch module mapping hidden states to vocabulary.\n \"\"\"\n return None # Overwrite for models with output embeddings\n\n def tie_weights(self):\n \"\"\"\n Tie the weights between the input embeddings and the output embeddings.\n If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning\n the weights instead.\n \"\"\"\n output_embeddings = self.get_output_embeddings()\n if output_embeddings is not None:\n if isinstance(output_embeddings, list):\n for x in output_embeddings:\n self._tie_or_clone_weights(x, self.get_input_embeddings())\n else:\n self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())\n\n def _tie_or_clone_weights(self, output_embeddings, input_embeddings):\n \"\"\" Tie or clone module weights depending of weither we are using TorchScript or not\n \"\"\"\n if self.config.torchscript:\n output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())\n else:\n output_embeddings.weight = input_embeddings.weight\n\n if hasattr(output_embeddings, \"bias\") and output_embeddings.bias is not None:\n output_embeddings.bias.data = torch.nn.functional.pad(\n output_embeddings.bias.data,\n (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),\n \"constant\",\n 0,\n )\n if hasattr(output_embeddings, \"out_features\") and hasattr(input_embeddings, \"num_embeddings\"):\n output_embeddings.out_features = input_embeddings.num_embeddings\n\n def resize_token_embeddings(self, new_num_tokens=None):\n \"\"\" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.\n Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.\n\n Arguments:\n\n new_num_tokens: (`optional`) int:\n New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.\n If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.\n\n Return: ``torch.nn.Embeddings``\n Pointer to the input tokens Embeddings Module of the model\n \"\"\"\n base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed\n model_embeds = base_model._resize_token_embeddings(new_num_tokens)\n if new_num_tokens is None:\n return model_embeds\n\n # Update base model and current model config\n self.config.vocab_size = new_num_tokens\n base_model.vocab_size = new_num_tokens\n\n # Tie weights again if needed\n self.tie_weights()\n\n return model_embeds\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.get_input_embeddings()\n new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)\n self.set_input_embeddings(new_embeddings)\n return self.get_input_embeddings()\n\n def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):\n \"\"\" Build a resized Embedding Module from a provided token Embedding Module.\n Increasing the size will add newly initialized vectors at the end\n Reducing the size will remove vectors from the end\n\n Args:\n new_num_tokens: (`optional`) int\n New number of tokens in the embedding matrix.\n Increasing the size will add newly initialized vectors at the end\n Reducing the size will remove vectors from the end\n If not provided or None: return the provided token Embedding Module.\n Return: ``torch.nn.Embeddings``\n Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None\n \"\"\"\n if new_num_tokens is None:\n return old_embeddings\n\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n if old_num_tokens == new_num_tokens:\n return old_embeddings\n\n # Build new embeddings\n new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)\n new_embeddings.to(old_embeddings.weight.device)\n\n # initialize all new embeddings (in particular added tokens)\n self._init_weights(new_embeddings)\n\n # Copy word embeddings from the previous weights\n num_tokens_to_copy = min(old_num_tokens, new_num_tokens)\n new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]\n\n return new_embeddings\n\n def init_weights(self):\n \"\"\" Initialize and prunes weights if needed. \"\"\"\n # Initialize weights\n self.apply(self._init_weights)\n\n # Prune heads if needed\n if self.config.pruned_heads:\n self.prune_heads(self.config.pruned_heads)\n\n # Tie weights if needed\n self.tie_weights()\n\n def prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the base model.\n\n Arguments:\n\n heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).\n E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.\n \"\"\"\n # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads\n for layer, heads in heads_to_prune.items():\n union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)\n self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON\n\n self.base_model._prune_heads(heads_to_prune)\n\n def save_pretrained(self, save_directory):\n \"\"\" Save a model and its configuration file to a directory, so that it\n can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.\n \"\"\"\n assert os.path.isdir(\n save_directory\n ), \"Saving path should be a directory where the model and configuration can be saved\"\n\n # Only save the model itself if we are using distributed training\n model_to_save = self.module if hasattr(self, \"module\") else self\n\n # Attach architecture to the config\n model_to_save.config.architectures = [model_to_save.__class__.__name__]\n\n # Save configuration file\n model_to_save.config.save_pretrained(save_directory)\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(save_directory, WEIGHTS_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n logger.info(\"Model weights saved in {}\".format(output_model_file))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\"Instantiate a pretrained pytorch model from a pre-trained model configuration.\n\n The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with ``model.train()``\n\n The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.\n It is up to you to train those weights with a downstream fine-tuning task.\n\n The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.\n\n Parameters:\n pretrained_model_name_or_path: either:\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n - None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) one of:\n - an instance of a class derived from :class:`~transformers.PretrainedConfig`, or\n - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n resume_download: (`optional`) boolean, default False:\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n # For example purposes. Not runnable.\n model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')\n model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n\n # Load config if we don't provide a configuration\n if not isinstance(config, PretrainedConfig):\n config_path = config if config is not None else pretrained_model_name_or_path\n config, model_kwargs = cls.config_class.from_pretrained(\n config_path,\n *model_args,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n **kwargs,\n )\n else:\n model_kwargs = kwargs\n\n # Load model\n if pretrained_model_name_or_path is not None:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")):\n # Load from a TF 1.0 checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):\n # Load from a TF 2.0 checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)\n elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):\n # Load from a PyTorch checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n raise EnvironmentError(\n \"Error no file named {} found in directory {} or `from_tf` set to False\".format(\n [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + \".index\"], pretrained_model_name_or_path\n )\n )\n elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):\n archive_file = pretrained_model_name_or_path\n elif os.path.isfile(pretrained_model_name_or_path + \".index\"):\n assert (\n from_tf\n ), \"We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint\".format(\n pretrained_model_name_or_path + \".index\"\n )\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n archive_file = hf_bucket_url(\n pretrained_model_name_or_path, postfix=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME)\n )\n\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(\n archive_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n )\n except EnvironmentError:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n msg = \"Couldn't reach server at '{}' to download pretrained weights.\".format(archive_file)\n else:\n msg = (\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url to model weight files named one of {} but \"\n \"couldn't find any such file at this path or url.\".format(\n pretrained_model_name_or_path,\n \", \".join(cls.pretrained_model_archive_map.keys()),\n archive_file,\n [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME],\n )\n )\n raise EnvironmentError(msg)\n\n if resolved_archive_file == archive_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(archive_file, resolved_archive_file))\n else:\n resolved_archive_file = None\n\n # Instantiate model.\n model = cls(config, *model_args, **model_kwargs)\n\n if state_dict is None and not from_tf:\n try:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n except Exception:\n raise OSError(\n \"Unable to load weights from pytorch checkpoint file. \"\n \"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. \"\n )\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n\n if from_tf:\n if resolved_archive_file.endswith(\".index\"):\n # Load from a TensorFlow 1.X checkpoint - provided by original authors\n model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'\n else:\n # Load from our TensorFlow 2.0 checkpoints\n try:\n from transformers import load_tf2_checkpoint_in_pytorch_model\n\n model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see \"\n \"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n else:\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants\n # so we need to apply the function recursively.\n def load(module: nn.Module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, cls.base_model_prefix) and any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n start_prefix = cls.base_model_prefix + \".\"\n if hasattr(model, cls.base_model_prefix) and not any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n model_to_load = getattr(model, cls.base_model_prefix)\n\n load(model_to_load, prefix=start_prefix)\n if len(missing_keys) > 0:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys\n )\n )\n if len(unexpected_keys) > 0:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys\n )\n )\n if len(error_msgs) > 0:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(\n model.__class__.__name__, \"\\n\\t\".join(error_msgs)\n )\n )\n\n model.tie_weights() # make sure word embedding weights are still tied if needed\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n if output_loading_info:\n loading_info = {\"missing_keys\": missing_keys, \"unexpected_keys\": unexpected_keys, \"error_msgs\": error_msgs}\n return model, loading_info\n\n return model\n\n def prepare_inputs_for_generation(self, input_ids, **kwargs):\n return {\"input_ids\": input_ids}\n\n def _do_output_past(self, outputs):\n has_output_past = hasattr(self.config, \"output_past\") and self.config.output_past\n has_mem_len = hasattr(self.config, \"mem_len\") and self.config.mem_len\n\n if has_output_past and not has_mem_len and len(outputs) > 1:\n return True\n elif has_mem_len and self.config.mem_len > 0 and len(outputs) > 1:\n return True\n\n return False\n\n @torch.no_grad()\n def generate(\n self,\n input_ids=None,\n max_length=None,\n do_sample=True,\n num_beams=None,\n temperature=None,\n top_k=None,\n top_p=None,\n repetition_penalty=None,\n bos_token_id=None,\n pad_token_id=None,\n eos_token_ids=None,\n length_penalty=None,\n num_return_sequences=None,\n ):\n r\"\"\" Generates sequences for models with a LM head. The method currently supports greedy or penalized greedy decoding, sampling with top-k or nucleus sampling\n and beam-search.\n\n Adapted in part from `Facebook's XLM beam search code`_.\n\n .. _`Facebook's XLM beam search code`:\n https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529\n\n\n Parameters:\n\n input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`\n The sequence used as a prompt for the generation. If `None` the method initializes\n it as an empty `torch.LongTensor` of shape `(1,)`.\n\n max_length: (`optional`) int\n The max length of the sequence to be generated. Between 1 and infinity. Default to 20.\n\n do_sample: (`optional`) bool\n If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `True`.\n\n num_beams: (`optional`) int\n Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.\n\n temperature: (`optional`) float\n The value used to module the next token probabilities. Must be strictely positive. Default to 1.0.\n\n top_k: (`optional`) int\n The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.\n\n top_p: (`optional`) float\n The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.\n\n repetition_penalty: (`optional`) float\n The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.\n\n bos_token_id: (`optional`) int\n Beginning of sentence token if no prompt is provided. Default to 0.\n\n eos_token_ids: (`optional`) int or list of int\n End of sequence token or list of tokens to stop the generation. Default to 0.\n length_penalty: (`optional`) float\n Exponential penalty to the length. Default to 1.\n\n num_return_sequences: (`optional`) int\n The number of independently computed returned sequences for each element in the batch. Default to 1.\n\n Return:\n\n output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`\n sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`\n\n Examples::\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n outputs = model.generate(max_length=40, bos_token_id=tokenizer.bos_token_id, eos_token_ids=tokenizer.eos_token_id, do_sample=False) # do greedy decoding\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context\n outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, bos_token_id=tokenizer.bos_token_id, pad_token_id=tokenizer.pad_token_id, eos_token_ids=tokenizer.eos_token_id, num_return_sequences=3) # 3 generate sequences using by sampling\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.\n input_context = 'Legal My neighbor is' # \"Legal\" is one of the control codes for ctrl\n input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n \"\"\"\n\n # We cannot generate if the model does not have a LM head\n if self.get_output_embeddings() is None:\n raise AttributeError(\n \"You tried to generate sequences with a model that does not have a LM Head.\"\n \"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`)\"\n )\n\n max_length = max_length if max_length is not None else self.config.max_length\n do_sample = do_sample if do_sample is not None else self.config.do_sample\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n temperature = temperature if temperature is not None else self.config.temperature\n top_k = top_k if top_k is not None else self.config.top_k\n top_p = top_p if top_p is not None else self.config.top_p\n repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty\n bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id\n pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id\n eos_token_ids = eos_token_ids if eos_token_ids is not None else self.config.eos_token_ids\n length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty\n num_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n\n if input_ids is not None:\n batch_size = input_ids.shape[0] # overriden by the input batch_size\n else:\n batch_size = 1\n if isinstance(eos_token_ids, int):\n eos_token_ids = [eos_token_ids]\n\n assert isinstance(max_length, int) and max_length > 0, \"`max_length` should be a strictely positive integer.\"\n assert isinstance(do_sample, bool), \"`do_sample` should be a boolean.\"\n assert isinstance(num_beams, int) and num_beams > 0, \"`num_beams` should be a strictely positive integer.\"\n assert temperature > 0, \"`temperature` should be strictely positive.\"\n assert isinstance(top_k, int) and top_k >= 0, \"`top_k` should be a positive integer.\"\n assert 0 <= top_p <= 1, \"`top_p` should be between 0 and 1.\"\n assert repetition_penalty >= 1.0, \"`repetition_penalty` should be >= 1.\"\n assert input_ids is not None or (\n isinstance(bos_token_id, int) and bos_token_id >= 0\n ), \"If input_ids is not defined, `bos_token_id` should be a positive integer.\"\n assert pad_token_id is None or (\n isinstance(pad_token_id, int) and (pad_token_id >= 0)\n ), \"`pad_token_id` should be a positive integer.\"\n assert (eos_token_ids is None) or (\n isinstance(eos_token_ids, (list, tuple)) and ((isinstance(e, int) and e >= 0) for e in eos_token_ids)\n ), \"`eos_token_ids` should be a positive integer or a list/tuple of positive integers.\"\n assert length_penalty > 0, \"`length_penalty` should be strictely positive.\"\n assert (\n isinstance(num_return_sequences, int) and num_return_sequences > 0\n ), \"`num_return_sequences` should be a strictely positive integer.\"\n\n if input_ids is None:\n assert isinstance(bos_token_id, int) and bos_token_id >= 0, (\n \"you should either supply a context to complete as `input_ids` input \"\n \"or a `bos_token_id` (integer >= 0) as a first token to start the generation.\"\n )\n input_ids = torch.full(\n (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device\n )\n else:\n assert input_ids.dim() == 2, \"Input prompt should be of shape (batch_size, sequence length).\"\n\n if pad_token_id is None and eos_token_ids is not None:\n logger.warning(\n \"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence\".format(eos_token_ids[0])\n )\n pad_token_id = eos_token_ids[0]\n\n # current position and vocab size\n cur_len = input_ids.shape[1]\n vocab_size = self.config.vocab_size\n\n if num_return_sequences != 1:\n # Expand input to num return sequences\n input_ids = input_ids.unsqueeze(1).expand(batch_size, num_return_sequences, cur_len)\n input_ids = input_ids.contiguous().view(\n batch_size * num_return_sequences, cur_len\n ) # (batch_size * num_return_sequences, cur_len)\n effective_batch_size = batch_size * num_return_sequences\n else:\n effective_batch_size = batch_size\n\n if num_beams > 1:\n output = self._generate_beam_search(\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n effective_batch_size,\n length_penalty,\n num_beams,\n vocab_size,\n )\n else:\n output = self._generate_no_beam_search(\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n effective_batch_size,\n )\n\n return output\n\n def _generate_no_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n batch_size,\n ):\n \"\"\" Generate sequences for each example without beam search (num_beams == 1).\n All returned sequence are generated independantly.\n \"\"\"\n # current position / max lengths / length of generated sentences / unfinished sentences\n unfinished_sents = input_ids.new(batch_size).fill_(1)\n sent_lengths = input_ids.new(batch_size).fill_(max_length)\n\n past = None\n\n while cur_len < max_length:\n model_inputs = self.prepare_inputs_for_generation(input_ids, past=past)\n outputs = self(**model_inputs)\n next_token_logits = outputs[0][:, -1, :]\n\n # if model has past, then set the past variable to speed up decoding\n if self._do_output_past(outputs):\n past = outputs[1]\n\n # repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)\n if repetition_penalty != 1.0:\n for i in range(batch_size):\n for previous_token in set(input_ids[i].tolist()):\n # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability\n if next_token_logits[i, previous_token] < 0:\n next_token_logits[i, previous_token] *= repetition_penalty\n else:\n next_token_logits[i, previous_token] /= repetition_penalty\n\n if do_sample:\n # Temperature (higher temperature => more likely to sample low probability tokens)\n if temperature != 1.0:\n next_token_logits = next_token_logits / temperature\n # Top-p/top-k filtering\n next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)\n # Sample\n next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1)\n else:\n # Greedy decoding\n next_token = torch.argmax(next_token_logits, dim=-1)\n\n # update generations and finished sentences\n if eos_token_ids is not None:\n # pad finished sentences if eos_token_ids exist\n tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)\n else:\n tokens_to_add = next_token\n\n input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)\n\n if eos_token_ids is not None:\n for eos_token_id in eos_token_ids:\n eos_in_sents = tokens_to_add == eos_token_id\n # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length\n is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()\n sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len + 1)\n # unfinished_sents is set to zero if eos in sentence\n unfinished_sents.mul_((~eos_in_sents).long())\n\n cur_len = cur_len + 1\n\n # stop when there is a </s> in each sentence, or if we exceed the maximul length\n if unfinished_sents.max() == 0:\n break\n\n # if there are different sentences lengths in the batch, some batches have to be padded\n if sent_lengths.min().item() != sent_lengths.max().item():\n assert pad_token_id is not None, \"`Pad_token_id` has to be defined if batches have different lengths\"\n # finished sents are filled with pad_token\n decoded = input_ids.new(batch_size, sent_lengths.max().item()).fill_(pad_token_id)\n else:\n decoded = input_ids\n\n for hypo_idx, hypo in enumerate(input_ids):\n decoded[hypo_idx, : sent_lengths[hypo_idx]] = hypo[: sent_lengths[hypo_idx]]\n\n return decoded\n\n def _generate_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n batch_size,\n length_penalty,\n num_beams,\n vocab_size,\n ):\n \"\"\" Generate sequences for each example with beam search.\n \"\"\"\n # Expand input to num beams\n input_ids = input_ids.unsqueeze(1).expand(batch_size, num_beams, cur_len)\n input_ids = input_ids.contiguous().view(batch_size * num_beams, cur_len) # (batch_size * num_beams, cur_len)\n\n # generated hypotheses\n generated_hyps = [\n BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=False) for _ in range(batch_size)\n ]\n\n # scores for each sentence in the beam\n beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)\n beam_scores[:, 1:] = -1e9\n beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)\n\n # cache compute states\n past = None\n\n # done sentences\n done = [False for _ in range(batch_size)]\n\n while cur_len < max_length:\n model_inputs = self.prepare_inputs_for_generation(input_ids, past=past)\n outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)\n scores = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)\n\n # if model has past, then set the past variable to speed up decoding\n if self._do_output_past(outputs):\n past = outputs[1]\n\n # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)\n if repetition_penalty != 1.0:\n for i in range(batch_size * num_beams):\n for previous_token in set(input_ids[i].tolist()):\n # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability\n if scores[i, previous_token] < 0:\n scores[i, previous_token] *= repetition_penalty\n else:\n scores[i, previous_token] /= repetition_penalty\n\n if do_sample:\n # Temperature (higher temperature => more likely to sample low probability tokens)\n if temperature != 1.0:\n scores = scores / temperature\n # Top-p/top-k filtering\n scores = top_k_top_p_filtering(\n scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2\n ) # (batch_size * num_beams, vocab_size)\n # Sample 2 next words for each beam (so we have some spare tokens and match output of greedy beam search)\n next_words = torch.multinomial(F.softmax(scores, dim=-1), num_samples=2) # (batch_size * num_beams, 2)\n # Compute next scores\n _scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)\n _scores = torch.gather(_scores, -1, next_words) # (batch_size * num_beams, 2)\n next_scores = _scores + beam_scores[:, None].expand_as(_scores) # (batch_size * num_beams, 2)\n # Match shape of greedy beam search\n next_words = next_words.view(batch_size, 2 * num_beams) # (batch_size, 2 * num_beams)\n next_scores = next_scores.view(batch_size, 2 * num_beams) # (batch_size, 2 * num_beams)\n else:\n # do greedy beam search\n scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)\n assert scores.size() == (batch_size * num_beams, vocab_size)\n # Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product)\n _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n # re-organize to group the beam together (we are keeping top hypothesis accross beams)\n _scores = _scores.view(batch_size, num_beams * vocab_size) # (batch_size, num_beams * vocab_size)\n next_scores, next_words = torch.topk(_scores, 2 * num_beams, dim=1, largest=True, sorted=True)\n\n assert next_scores.size() == next_words.size() == (batch_size, 2 * num_beams)\n\n # next batch beam content\n # list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch)\n next_batch_beam = []\n\n # for each sentence\n for batch_idx in range(batch_size):\n\n # if we are done with this sentence\n done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(\n next_scores[batch_idx].max().item()\n )\n if done[batch_idx]:\n assert (\n len(generated_hyps[batch_idx]) >= num_beams\n ), \"Batch can only be done if at least {} beams have been generated\".format(num_beams)\n assert (\n eos_token_ids is not None and pad_token_id is not None\n ), \"generated beams >= num_beams -> eos_token_id and pad_token have to be defined\"\n next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch\n continue\n\n # next sentence beam content\n next_sent_beam = []\n\n # next words for this sentence\n for idx, score in zip(next_words[batch_idx], next_scores[batch_idx]):\n\n # get beam and word IDs\n beam_id = idx // vocab_size\n word_id = idx % vocab_size\n\n # add to generated hypotheses if end of sentence or last iteration\n if eos_token_ids is not None and word_id.item() in eos_token_ids:\n generated_hyps[batch_idx].add(\n input_ids[batch_idx * num_beams + beam_id, :cur_len].clone(), score.item()\n )\n else:\n # add next predicted word if it is not eos_token\n next_sent_beam.append((score, word_id, batch_idx * num_beams + beam_id))\n\n # the beam for next step is full\n if len(next_sent_beam) == num_beams:\n break\n\n # update next beam content\n assert len(next_sent_beam) == num_beams, \"Beam should always be full\"\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == num_beams * (batch_idx + 1)\n\n # sanity check / prepare next batch\n assert len(next_batch_beam) == batch_size * num_beams\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_words = input_ids.new([x[1] for x in next_batch_beam])\n beam_idx = input_ids.new([x[2] for x in next_batch_beam])\n\n # re-order batch\n input_ids = input_ids[beam_idx, :]\n input_ids = torch.cat([input_ids, beam_words.unsqueeze(1)], dim=-1)\n\n # re-order internal states\n if past:\n reordered_past = []\n for layer_past in past:\n # get the correct batch idx from layer past batch dim\n # batch dim of `past` and `mems` is at 2nd position\n reordered_layer_past = [layer_past[:, i].unsqueeze(1).clone().detach() for i in beam_idx]\n reordered_layer_past = torch.cat(reordered_layer_past, dim=1)\n # check that shape matches\n assert reordered_layer_past.shape == layer_past.shape\n reordered_past.append(reordered_layer_past)\n past = tuple(reordered_past)\n\n # update current length\n cur_len = cur_len + 1\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n for batch_idx in range(batch_size):\n # Add all open beam hypothesis to generated_hyps\n if not done[batch_idx]:\n for idx, score in zip(next_words[batch_idx], next_scores[batch_idx]):\n\n # get beam and word IDs\n beam_id = idx // vocab_size\n word_id = idx % vocab_size\n generated_hyps[batch_idx].add(\n input_ids[batch_idx * num_beams + beam_id, :cur_len].clone(), score.item()\n )\n\n # select the best hypotheses\n sent_lengths = input_ids.new(batch_size)\n best = []\n\n for i, hypotheses in enumerate(generated_hyps):\n best_hyp = max(hypotheses.beams, key=lambda x: x[0])[1]\n sent_lengths[i] = len(best_hyp)\n best.append(best_hyp)\n\n # shorter batches are filled with pad_token\n if sent_lengths.min().item() != sent_lengths.max().item():\n assert pad_token_id is not None, \"`Pad_token_id` has to be defined\"\n sent_max_len = min(sent_lengths.max().item() + 1, max_length)\n decoded = input_ids.new(batch_size, sent_max_len).fill_(pad_token_id)\n\n # fill with hypothesis and eos_token_id if necessary\n for i, hypo in enumerate(best):\n decoded[i, : sent_lengths[i]] = hypo\n if sent_lengths[i] < max_length:\n decoded[i, sent_lengths[i]] = eos_token_ids[0]\n else:\n # none of the hypotheses have an eos_token\n assert (len(hypo) == max_length for hypo in best)\n decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)\n\n return decoded" }, { "identifier": "prune_linear_layer", "path": "transformers/src/transformers/modeling_utils.py", "snippet": "def prune_linear_layer(layer, index, dim=0):\n \"\"\" Prune a linear layer (a model parameters) to keep only entries in index.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n \"\"\"\n index = index.to(layer.weight.device)\n W = layer.weight.index_select(dim, index).clone().detach()\n if layer.bias is not None:\n if dim == 1:\n b = layer.bias.clone().detach()\n else:\n b = layer.bias[index].clone().detach()\n new_size = list(layer.weight.size())\n new_size[dim] = len(index)\n new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)\n new_layer.weight.requires_grad = False\n new_layer.weight.copy_(W.contiguous())\n new_layer.weight.requires_grad = True\n if layer.bias is not None:\n new_layer.bias.requires_grad = False\n new_layer.bias.copy_(b.contiguous())\n new_layer.bias.requires_grad = True\n return new_layer" } ]
import copy import logging import math import numpy as np import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from .activations import gelu from .configuration_distilbert import DistilBertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils import PreTrainedModel, prune_linear_layer
15,230
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert) """ logger = logging.getLogger(__name__) DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { "distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-pytorch_model.bin", "distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-pytorch_model.bin", "distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-pytorch_model.bin", "distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-distilled-squad-pytorch_model.bin", "distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-pytorch_model.bin", "distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-pytorch_model.bin", "distilbert-base-uncased-finetuned-sst-2-english": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-finetuned-sst-2-english-pytorch_model.bin", } # UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE # def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() out.requires_grad = False class Embeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim) if config.sinusoidal_pos_embds: create_sinusoidal_embeddings( n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight ) self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12) self.dropout = nn.Dropout(config.dropout) def forward(self, input_ids): """ Parameters ---------- input_ids: torch.tensor(bs, max_seq_length) The token ids to embed. Outputs ------- embeddings: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type embeddings) """ seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length) word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim) position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim) embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim) embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim) embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim) return embeddings class MultiHeadSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.n_heads = config.n_heads self.dim = config.dim self.dropout = nn.Dropout(p=config.attention_dropout) self.output_attentions = config.output_attentions assert self.dim % self.n_heads == 0 self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.pruned_heads = set() def prune_heads(self, heads): attention_head_size = self.dim // self.n_heads if len(heads) == 0: return mask = torch.ones(self.n_heads, attention_head_size) heads = set(heads) - self.pruned_heads for head in heads: head -= sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert) """ logger = logging.getLogger(__name__) DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { "distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-pytorch_model.bin", "distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-pytorch_model.bin", "distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-pytorch_model.bin", "distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-distilled-squad-pytorch_model.bin", "distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-pytorch_model.bin", "distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-pytorch_model.bin", "distilbert-base-uncased-finetuned-sst-2-english": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-finetuned-sst-2-english-pytorch_model.bin", } # UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE # def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() out.requires_grad = False class Embeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim) if config.sinusoidal_pos_embds: create_sinusoidal_embeddings( n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight ) self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12) self.dropout = nn.Dropout(config.dropout) def forward(self, input_ids): """ Parameters ---------- input_ids: torch.tensor(bs, max_seq_length) The token ids to embed. Outputs ------- embeddings: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type embeddings) """ seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length) word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim) position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim) embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim) embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim) embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim) return embeddings class MultiHeadSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.n_heads = config.n_heads self.dim = config.dim self.dropout = nn.Dropout(p=config.attention_dropout) self.output_attentions = config.output_attentions assert self.dim % self.n_heads == 0 self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.pruned_heads = set() def prune_heads(self, heads): attention_head_size = self.dim // self.n_heads if len(heads) == 0: return mask = torch.ones(self.n_heads, attention_head_size) heads = set(heads) - self.pruned_heads for head in heads: head -= sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
5
2023-10-15 02:31:09+00:00
24k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/pydantic/type_adapter.py
[ { "identifier": "_config", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_config.py", "snippet": "DEPRECATION_MESSAGE = 'Support for class-based `config` is deprecated, use ConfigDict instead.'\nV2_REMOVED_KEYS = {\n 'allow_mutation',\n 'error_msg_templates',\n 'fields',\n 'getter_dict',\n 'smart_union',\n 'underscore_attrs_are_private',\n 'json_loads',\n 'json_dumps',\n 'copy_on_model_validation',\n 'post_init_call',\n}\nV2_RENAMED_KEYS = {\n 'allow_population_by_field_name': 'populate_by_name',\n 'anystr_lower': 'str_to_lower',\n 'anystr_strip_whitespace': 'str_strip_whitespace',\n 'anystr_upper': 'str_to_upper',\n 'keep_untouched': 'ignored_types',\n 'max_anystr_length': 'str_max_length',\n 'min_anystr_length': 'str_min_length',\n 'orm_mode': 'from_attributes',\n 'schema_extra': 'json_schema_extra',\n 'validate_all': 'validate_default',\n}\nclass ConfigWrapper:\nclass ConfigWrapperStack:\n def __init__(self, config: ConfigDict | dict[str, Any] | type[Any] | None, *, check: bool = True):\n def for_model(cls, bases: tuple[type[Any], ...], namespace: dict[str, Any], kwargs: dict[str, Any]) -> Self:\n def __getattr__(self, name: str) -> Any:\n def core_config(self, obj: Any) -> core_schema.CoreConfig:\n def dict_not_none(**kwargs: Any) -> Any:\n def __repr__(self):\n def __init__(self, config_wrapper: ConfigWrapper):\n def tail(self) -> ConfigWrapper:\n def push(self, config_wrapper: ConfigWrapper | ConfigDict | None) -> ContextManager[None]:\n def _context_manager() -> Iterator[None]:\ndef prepare_config(config: ConfigDict | dict[str, Any] | type[Any] | None) -> ConfigDict:\ndef check_deprecated(config_dict: ConfigDict) -> None:" }, { "identifier": "_core_utils", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_core_utils.py", "snippet": "_CORE_SCHEMA_FIELD_TYPES = {'typed-dict-field', 'dataclass-field', 'model-field', 'computed-field'}\n_FUNCTION_WITH_INNER_SCHEMA_TYPES = {'function-before', 'function-after', 'function-wrap'}\n_LIST_LIKE_SCHEMA_WITH_ITEMS_TYPES = {'list', 'tuple-variable', 'set', 'frozenset'}\n_DEFINITIONS_CACHE_METADATA_KEY = 'pydantic.definitions_cache'\nNEEDS_APPLY_DISCRIMINATED_UNION_METADATA_KEY = 'pydantic.internal.needs_apply_discriminated_union'\nHAS_INVALID_SCHEMAS_METADATA_KEY = 'pydantic.internal.invalid'\nT = TypeVar('T')\ndef is_core_schema(\n schema: CoreSchemaOrField,\n) -> TypeGuard[CoreSchema]:\ndef is_core_schema_field(\n schema: CoreSchemaOrField,\n) -> TypeGuard[CoreSchemaField]:\ndef is_function_with_inner_schema(\n schema: CoreSchemaOrField,\n) -> TypeGuard[FunctionSchemaWithInnerSchema]:\ndef is_list_like_schema_with_items_schema(\n schema: CoreSchema,\n) -> TypeGuard[\ndef get_type_ref(type_: type[Any], args_override: tuple[type[Any], ...] | None = None) -> str:\ndef get_ref(s: core_schema.CoreSchema) -> None | str:\ndef collect_definitions(schema: core_schema.CoreSchema) -> dict[str, core_schema.CoreSchema]:\n def _record_valid_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\ndef define_expected_missing_refs(\n schema: core_schema.CoreSchema, allowed_missing_refs: set[str]\n) -> core_schema.CoreSchema | None:\ndef collect_invalid_schemas(schema: core_schema.CoreSchema) -> bool:\n def _is_schema_valid(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\n def __init__(self):\n def _build_schema_type_to_method(self) -> dict[core_schema.CoreSchemaType, Recurse]:\n def walk(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\n def _walk(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\n def _handle_other_schemas(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\n def _handle_ser_schemas(self, ser_schema: core_schema.SerSchema, f: Walk) -> core_schema.SerSchema:\n def handle_definitions_schema(self, schema: core_schema.DefinitionsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_list_schema(self, schema: core_schema.ListSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_set_schema(self, schema: core_schema.SetSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_frozenset_schema(self, schema: core_schema.FrozenSetSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_generator_schema(self, schema: core_schema.GeneratorSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_tuple_variable_schema(\n self, schema: core_schema.TupleVariableSchema | core_schema.TuplePositionalSchema, f: Walk\n ) -> core_schema.CoreSchema:\n def handle_tuple_positional_schema(\n self, schema: core_schema.TupleVariableSchema | core_schema.TuplePositionalSchema, f: Walk\n ) -> core_schema.CoreSchema:\n def handle_dict_schema(self, schema: core_schema.DictSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_function_schema(self, schema: AnyFunctionSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_union_schema(self, schema: core_schema.UnionSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_tagged_union_schema(self, schema: core_schema.TaggedUnionSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_chain_schema(self, schema: core_schema.ChainSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_lax_or_strict_schema(self, schema: core_schema.LaxOrStrictSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_json_or_python_schema(self, schema: core_schema.JsonOrPythonSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_model_fields_schema(self, schema: core_schema.ModelFieldsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_typed_dict_schema(self, schema: core_schema.TypedDictSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_dataclass_args_schema(self, schema: core_schema.DataclassArgsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_arguments_schema(self, schema: core_schema.ArgumentsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_call_schema(self, schema: core_schema.CallSchema, f: Walk) -> core_schema.CoreSchema:\ndef walk_core_schema(schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\ndef simplify_schema_references(schema: core_schema.CoreSchema) -> core_schema.CoreSchema: # noqa: C901\n def collect_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\n def count_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\n def can_be_inlined(s: core_schema.DefinitionReferenceSchema, ref: str) -> bool:\n def inline_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\ndef _strip_metadata(schema: CoreSchema) -> CoreSchema:\n def strip_metadata(s: CoreSchema, recurse: Recurse) -> CoreSchema:\ndef pretty_print_core_schema(\n schema: CoreSchema,\n include_metadata: bool = False,\n) -> None:\ndef validate_core_schema(schema: CoreSchema) -> CoreSchema:\nclass _WalkCoreSchema:" }, { "identifier": "_discriminated_union", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_discriminated_union.py", "snippet": "CORE_SCHEMA_METADATA_DISCRIMINATOR_PLACEHOLDER_KEY = 'pydantic.internal.union_discriminator'\nclass MissingDefinitionForUnionRef(Exception):\nclass _ApplyInferredDiscriminator:\n def __init__(self, ref: str) -> None:\ndef set_discriminator(schema: CoreSchema, discriminator: Any) -> None:\ndef apply_discriminators(schema: core_schema.CoreSchema) -> core_schema.CoreSchema:\n def inner(s: core_schema.CoreSchema, recurse: _core_utils.Recurse) -> core_schema.CoreSchema:\ndef apply_discriminator(\n schema: core_schema.CoreSchema, discriminator: str, definitions: dict[str, core_schema.CoreSchema] | None = None\n) -> core_schema.CoreSchema:\n def __init__(self, discriminator: str, definitions: dict[str, core_schema.CoreSchema]):\n def apply(self, schema: core_schema.CoreSchema) -> core_schema.CoreSchema:\n def _apply_to_root(self, schema: core_schema.CoreSchema) -> core_schema.CoreSchema:\n def _handle_choice(self, choice: core_schema.CoreSchema) -> None:\n def _is_discriminator_shared(self, choice: core_schema.TaggedUnionSchema) -> bool:\n def _infer_discriminator_values_for_choice( # noqa C901\n self, choice: core_schema.CoreSchema, source_name: str | None\n ) -> list[str | int]:\n def _infer_discriminator_values_for_typed_dict_choice(\n self, choice: core_schema.TypedDictSchema, source_name: str | None = None\n ) -> list[str | int]:\n def _infer_discriminator_values_for_model_choice(\n self, choice: core_schema.ModelFieldsSchema, source_name: str | None = None\n ) -> list[str | int]:\n def _infer_discriminator_values_for_dataclass_choice(\n self, choice: core_schema.DataclassArgsSchema, source_name: str | None = None\n ) -> list[str | int]:\n def _infer_discriminator_values_for_field(self, field: CoreSchemaField, source: str) -> list[str | int]:\n def _infer_discriminator_values_for_inner_schema(\n self, schema: core_schema.CoreSchema, source: str\n ) -> list[str | int]:\n def _set_unique_choice_for_values(self, choice: core_schema.CoreSchema, values: Sequence[str | int]) -> None:" }, { "identifier": "_generate_schema", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_generate_schema.py", "snippet": "def _generate_schema(self, obj: Any) -> core_schema.CoreSchema:\n \"\"\"Recursively generate a pydantic-core schema for any supported python type.\"\"\"\n has_invalid_schema = self._has_invalid_schema\n self._has_invalid_schema = False\n needs_apply_discriminated_union = self._needs_apply_discriminated_union\n self._needs_apply_discriminated_union = False\n schema = self._post_process_generated_schema(self._generate_schema_inner(obj))\n self._has_invalid_schema = self._has_invalid_schema or has_invalid_schema\n self._needs_apply_discriminated_union = self._needs_apply_discriminated_union or needs_apply_discriminated_union\n return schema" }, { "identifier": "_typing_extra", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_typing_extra.py", "snippet": " def origin_is_union(tp: type[Any] | None) -> bool:\n def origin_is_union(tp: type[Any] | None) -> bool:\ndef is_none_type(type_: Any) -> bool:\ndef is_callable_type(type_: type[Any]) -> bool:\ndef is_literal_type(type_: type[Any]) -> bool:\ndef literal_values(type_: type[Any]) -> tuple[Any, ...]:\ndef all_literal_values(type_: type[Any]) -> list[Any]:\ndef is_annotated(ann_type: Any) -> bool:\ndef is_namedtuple(type_: type[Any]) -> bool:\ndef is_new_type(type_: type[Any]) -> bool:\ndef _check_classvar(v: type[Any] | None) -> bool:\ndef is_classvar(ann_type: type[Any]) -> bool:\ndef _check_finalvar(v: type[Any] | None) -> bool:\ndef is_finalvar(ann_type: Any) -> bool:\ndef parent_frame_namespace(*, parent_depth: int = 2) -> dict[str, Any] | None:\ndef add_module_globals(obj: Any, globalns: dict[str, Any] | None = None) -> dict[str, Any]:\ndef get_cls_types_namespace(cls: type[Any], parent_namespace: dict[str, Any] | None = None) -> dict[str, Any]:\ndef get_cls_type_hints_lenient(obj: Any, globalns: dict[str, Any] | None = None) -> dict[str, Any]:\ndef eval_type_lenient(value: Any, globalns: dict[str, Any] | None, localns: dict[str, Any] | None) -> Any:\ndef get_function_type_hints(\n function: Callable[..., Any], *, include_keys: set[str] | None = None, types_namespace: dict[str, Any] | None = None\n) -> dict[str, Any]:\n def _make_forward_ref(\n arg: Any,\n is_argument: bool = True,\n *,\n is_class: bool = False,\n ) -> typing.ForwardRef:\n def get_type_hints( # noqa: C901\n obj: Any,\n globalns: dict[str, Any] | None = None,\n localns: dict[str, Any] | None = None,\n include_extras: bool = False,\n ) -> dict[str, Any]: # pragma: no cover\n def evaluate_fwd_ref(\n ref: ForwardRef, globalns: dict[str, Any] | None = None, localns: dict[str, Any] | None = None\n ) -> Any:\n def evaluate_fwd_ref(\n ref: ForwardRef, globalns: dict[str, Any] | None = None, localns: dict[str, Any] | None = None\n ) -> Any:\ndef is_dataclass(_cls: type[Any]) -> TypeGuard[type[StandardDataclass]]:\ndef origin_is_type_alias_type(origin: Any) -> TypeGuard[TypeAliasType]:\nLITERAL_TYPES: set[Any] = {Literal}\nNONE_TYPES: tuple[Any, ...] = (None, NoneType, *(tp[None] for tp in LITERAL_TYPES))" }, { "identifier": "ConfigDict", "path": "backend/venv/lib/python3.10/site-packages/pydantic/config.py", "snippet": "class ConfigDict(TypedDict, total=False):\n \"\"\"A TypedDict for configuring Pydantic behaviour.\"\"\"\n\n title: str | None\n \"\"\"The title for the generated JSON schema, defaults to the model's name\"\"\"\n\n str_to_lower: bool\n \"\"\"Whether to convert all characters to lowercase for str types. Defaults to `False`.\"\"\"\n\n str_to_upper: bool\n \"\"\"Whether to convert all characters to uppercase for str types. Defaults to `False`.\"\"\"\n str_strip_whitespace: bool\n \"\"\"Whether to strip leading and trailing whitespace for str types.\"\"\"\n\n str_min_length: int\n \"\"\"The minimum length for str types. Defaults to `None`.\"\"\"\n\n str_max_length: int | None\n \"\"\"The maximum length for str types. Defaults to `None`.\"\"\"\n\n extra: ExtraValues | None\n \"\"\"\n Whether to ignore, allow, or forbid extra attributes during model initialization. Defaults to `'ignore'`.\n\n You can configure how pydantic handles the attributes that are not defined in the model:\n\n * `allow` - Allow any extra attributes.\n * `forbid` - Forbid any extra attributes.\n * `ignore` - Ignore any extra attributes.\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n\n class User(BaseModel):\n model_config = ConfigDict(extra='ignore') # (1)!\n\n name: str\n\n\n user = User(name='John Doe', age=20) # (2)!\n print(user)\n #> name='John Doe'\n ```\n\n 1. This is the default behaviour.\n 2. The `age` argument is ignored.\n\n Instead, with `extra='allow'`, the `age` argument is included:\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n\n class User(BaseModel):\n model_config = ConfigDict(extra='allow')\n\n name: str\n\n\n user = User(name='John Doe', age=20) # (1)!\n print(user)\n #> name='John Doe' age=20\n ```\n\n 1. The `age` argument is included.\n\n With `extra='forbid'`, an error is raised:\n\n ```py\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n\n class User(BaseModel):\n model_config = ConfigDict(extra='forbid')\n\n name: str\n\n\n try:\n User(name='John Doe', age=20)\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for User\n age\n Extra inputs are not permitted [type=extra_forbidden, input_value=20, input_type=int]\n '''\n ```\n \"\"\"\n\n frozen: bool\n \"\"\"\n Whether or not models are faux-immutable, i.e. whether `__setattr__` is allowed, and also generates\n a `__hash__()` method for the model. This makes instances of the model potentially hashable if all the\n attributes are hashable. Defaults to `False`.\n\n Note:\n On V1, this setting was called `allow_mutation`, and was `True` by default.\n \"\"\"\n\n populate_by_name: bool\n \"\"\"\n Whether an aliased field may be populated by its name as given by the model\n attribute, as well as the alias. Defaults to `False`.\n\n Note:\n The name of this configuration setting was changed in **v2.0** from\n `allow_population_by_alias` to `populate_by_name`.\n\n ```py\n from pydantic import BaseModel, ConfigDict, Field\n\n\n class User(BaseModel):\n model_config = ConfigDict(populate_by_name=True)\n\n name: str = Field(alias='full_name') # (1)!\n age: int\n\n\n user = User(full_name='John Doe', age=20) # (2)!\n print(user)\n #> name='John Doe' age=20\n user = User(name='John Doe', age=20) # (3)!\n print(user)\n #> name='John Doe' age=20\n ```\n\n 1. The field `'name'` has an alias `'full_name'`.\n 2. The model is populated by the alias `'full_name'`.\n 3. The model is populated by the field name `'name'`.\n \"\"\"\n\n use_enum_values: bool\n \"\"\"\n Whether to populate models with the `value` property of enums, rather than the raw enum.\n This may be useful if you want to serialize `model.model_dump()` later. Defaults to `False`.\n \"\"\"\n\n validate_assignment: bool\n \"\"\"\n Whether to validate the data when the model is changed. Defaults to `False`.\n\n The default behavior of Pydantic is to validate the data when the model is created.\n\n In case the user changes the data after the model is created, the model is _not_ revalidated.\n\n ```py\n from pydantic import BaseModel\n\n class User(BaseModel):\n name: str\n\n user = User(name='John Doe') # (1)!\n print(user)\n #> name='John Doe'\n user.name = 123 # (1)!\n print(user)\n #> name=123\n ```\n\n 1. The validation happens only when the model is created.\n 2. The validation does not happen when the data is changed.\n\n In case you want to revalidate the model when the data is changed, you can use `validate_assignment=True`:\n\n ```py\n from pydantic import BaseModel, ValidationError\n\n class User(BaseModel, validate_assignment=True): # (1)!\n name: str\n\n user = User(name='John Doe') # (2)!\n print(user)\n #> name='John Doe'\n try:\n user.name = 123 # (3)!\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for User\n name\n Input should be a valid string [type=string_type, input_value=123, input_type=int]\n '''\n ```\n\n 1. You can either use class keyword arguments, or `model_config` to set `validate_assignment=True`.\n 2. The validation happens when the model is created.\n 3. The validation _also_ happens when the data is changed.\n \"\"\"\n\n arbitrary_types_allowed: bool\n \"\"\"\n Whether arbitrary types are allowed for field types. Defaults to `False`.\n\n ```py\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n # This is not a pydantic model, it's an arbitrary class\n class Pet:\n def __init__(self, name: str):\n self.name = name\n\n class Model(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n pet: Pet\n owner: str\n\n pet = Pet(name='Hedwig')\n # A simple check of instance type is used to validate the data\n model = Model(owner='Harry', pet=pet)\n print(model)\n #> pet=<__main__.Pet object at 0x0123456789ab> owner='Harry'\n print(model.pet)\n #> <__main__.Pet object at 0x0123456789ab>\n print(model.pet.name)\n #> Hedwig\n print(type(model.pet))\n #> <class '__main__.Pet'>\n try:\n # If the value is not an instance of the type, it's invalid\n Model(owner='Harry', pet='Hedwig')\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n pet\n Input should be an instance of Pet [type=is_instance_of, input_value='Hedwig', input_type=str]\n '''\n\n # Nothing in the instance of the arbitrary type is checked\n # Here name probably should have been a str, but it's not validated\n pet2 = Pet(name=42)\n model2 = Model(owner='Harry', pet=pet2)\n print(model2)\n #> pet=<__main__.Pet object at 0x0123456789ab> owner='Harry'\n print(model2.pet)\n #> <__main__.Pet object at 0x0123456789ab>\n print(model2.pet.name)\n #> 42\n print(type(model2.pet))\n #> <class '__main__.Pet'>\n ```\n \"\"\"\n\n from_attributes: bool\n \"\"\"\n Whether to build models and look up discriminators of tagged unions using python object attributes.\n \"\"\"\n\n loc_by_alias: bool\n \"\"\"Whether to use the actual key provided in the data (e.g. alias) for error `loc`s rather than the field's name. Defaults to `True`.\"\"\"\n\n alias_generator: Callable[[str], str] | None\n \"\"\"\n A callable that takes a field name and returns an alias for it.\n\n If data source field names do not match your code style (e. g. CamelCase fields),\n you can automatically generate aliases using `alias_generator`:\n\n ```py\n from pydantic import BaseModel, ConfigDict\n from pydantic.alias_generators import to_pascal\n\n class Voice(BaseModel):\n model_config = ConfigDict(alias_generator=to_pascal)\n\n name: str\n language_code: str\n\n voice = Voice(Name='Filiz', LanguageCode='tr-TR')\n print(voice.language_code)\n #> tr-TR\n print(voice.model_dump(by_alias=True))\n #> {'Name': 'Filiz', 'LanguageCode': 'tr-TR'}\n ```\n\n Note:\n Pydantic offers three built-in alias generators: [`to_pascal`][pydantic.alias_generators.to_pascal],\n [`to_camel`][pydantic.alias_generators.to_camel], and [`to_snake`][pydantic.alias_generators.to_snake].\n \"\"\"\n\n ignored_types: tuple[type, ...]\n \"\"\"A tuple of types that may occur as values of class attributes without annotations. This is\n typically used for custom descriptors (classes that behave like `property`). If an attribute is set on a\n class without an annotation and has a type that is not in this tuple (or otherwise recognized by\n _pydantic_), an error will be raised. Defaults to `()`.\n \"\"\"\n\n allow_inf_nan: bool\n \"\"\"Whether to allow infinity (`+inf` an `-inf`) and NaN values to float fields. Defaults to `True`.\"\"\"\n\n json_schema_extra: dict[str, object] | JsonSchemaExtraCallable | None\n \"\"\"A dict or callable to provide extra JSON schema properties. Defaults to `None`.\"\"\"\n\n json_encoders: dict[type[object], JsonEncoder] | None\n \"\"\"\n A `dict` of custom JSON encoders for specific types. Defaults to `None`.\n\n !!! warning \"Deprecated\"\n This config option is a carryover from v1.\n We originally planned to remove it in v2 but didn't have a 1:1 replacement so we are keeping it for now.\n It is still deprecated and will likely be removed in the future.\n \"\"\"\n\n # new in V2\n strict: bool\n \"\"\"\n _(new in V2)_ If `True`, strict validation is applied to all fields on the model.\n\n By default, Pydantic attempts to coerce values to the correct type, when possible.\n\n There are situations in which you may want to disable this behavior, and instead raise an error if a value's type\n does not match the field's type annotation.\n\n To configure strict mode for all fields on a model, you can set `strict=True` on the model.\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n class Model(BaseModel):\n model_config = ConfigDict(strict=True)\n\n name: str\n age: int\n ```\n\n See [Strict Mode](../concepts/strict_mode.md) for more details.\n\n See the [Conversion Table](../concepts/conversion_table.md) for more details on how Pydantic converts data in both\n strict and lax modes.\n \"\"\"\n # whether instances of models and dataclasses (including subclass instances) should re-validate, default 'never'\n revalidate_instances: Literal['always', 'never', 'subclass-instances']\n \"\"\"\n When and how to revalidate models and dataclasses during validation. Accepts the string\n values of `'never'`, `'always'` and `'subclass-instances'`. Defaults to `'never'`.\n\n - `'never'` will not revalidate models and dataclasses during validation\n - `'always'` will revalidate models and dataclasses during validation\n - `'subclass-instances'` will revalidate models and dataclasses during validation if the instance is a\n subclass of the model or dataclass\n\n By default, model and dataclass instances are not revalidated during validation.\n\n ```py\n from typing import List\n\n from pydantic import BaseModel\n\n class User(BaseModel, revalidate_instances='never'): # (1)!\n hobbies: List[str]\n\n class SubUser(User):\n sins: List[str]\n\n class Transaction(BaseModel):\n user: User\n\n my_user = User(hobbies=['reading'])\n t = Transaction(user=my_user)\n print(t)\n #> user=User(hobbies=['reading'])\n\n my_user.hobbies = [1] # (2)!\n t = Transaction(user=my_user) # (3)!\n print(t)\n #> user=User(hobbies=[1])\n\n my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying'])\n t = Transaction(user=my_sub_user)\n print(t)\n #> user=SubUser(hobbies=['scuba diving'], sins=['lying'])\n ```\n\n 1. `revalidate_instances` is set to `'never'` by **default.\n 2. The assignment is not validated, unless you set `validate_assignment` to `True` in the model's config.\n 3. Since `revalidate_instances` is set to `never`, this is not revalidated.\n\n If you want to revalidate instances during validation, you can set `revalidate_instances` to `'always'`\n in the model's config.\n\n ```py\n from typing import List\n\n from pydantic import BaseModel, ValidationError\n\n class User(BaseModel, revalidate_instances='always'): # (1)!\n hobbies: List[str]\n\n class SubUser(User):\n sins: List[str]\n\n class Transaction(BaseModel):\n user: User\n\n my_user = User(hobbies=['reading'])\n t = Transaction(user=my_user)\n print(t)\n #> user=User(hobbies=['reading'])\n\n my_user.hobbies = [1]\n try:\n t = Transaction(user=my_user) # (2)!\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Transaction\n user.hobbies.0\n Input should be a valid string [type=string_type, input_value=1, input_type=int]\n '''\n\n my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying'])\n t = Transaction(user=my_sub_user)\n print(t) # (3)!\n #> user=User(hobbies=['scuba diving'])\n ```\n\n 1. `revalidate_instances` is set to `'always'`.\n 2. The model is revalidated, since `revalidate_instances` is set to `'always'`.\n 3. Using `'never'` we would have gotten `user=SubUser(hobbies=['scuba diving'], sins=['lying'])`.\n\n It's also possible to set `revalidate_instances` to `'subclass-instances'` to only revalidate instances\n of subclasses of the model.\n\n ```py\n from typing import List\n\n from pydantic import BaseModel\n\n class User(BaseModel, revalidate_instances='subclass-instances'): # (1)!\n hobbies: List[str]\n\n class SubUser(User):\n sins: List[str]\n\n class Transaction(BaseModel):\n user: User\n\n my_user = User(hobbies=['reading'])\n t = Transaction(user=my_user)\n print(t)\n #> user=User(hobbies=['reading'])\n\n my_user.hobbies = [1]\n t = Transaction(user=my_user) # (2)!\n print(t)\n #> user=User(hobbies=[1])\n\n my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying'])\n t = Transaction(user=my_sub_user)\n print(t) # (3)!\n #> user=User(hobbies=['scuba diving'])\n ```\n\n 1. `revalidate_instances` is set to `'subclass-instances'`.\n 2. This is not revalidated, since `my_user` is not a subclass of `User`.\n 3. Using `'never'` we would have gotten `user=SubUser(hobbies=['scuba diving'], sins=['lying'])`.\n \"\"\"\n\n ser_json_timedelta: Literal['iso8601', 'float']\n \"\"\"\n The format of JSON serialized timedeltas. Accepts the string values of `'iso8601'` and\n `'float'`. Defaults to `'iso8601'`.\n\n - `'iso8601'` will serialize timedeltas to ISO 8601 durations.\n - `'float'` will serialize timedeltas to the total number of seconds.\n \"\"\"\n\n ser_json_bytes: Literal['utf8', 'base64']\n \"\"\"\n The encoding of JSON serialized bytes. Accepts the string values of `'utf8'` and `'base64'`.\n Defaults to `'utf8'`.\n\n - `'utf8'` will serialize bytes to UTF-8 strings.\n - `'base64'` will serialize bytes to URL safe base64 strings.\n \"\"\"\n\n # whether to validate default values during validation, default False\n validate_default: bool\n \"\"\"Whether to validate default values during validation. Defaults to `False`.\"\"\"\n\n validate_return: bool\n \"\"\"whether to validate the return value from call validators. Defaults to `False`.\"\"\"\n\n protected_namespaces: tuple[str, ...]\n \"\"\"\n A `tuple` of strings that prevent model to have field which conflict with them.\n Defaults to `('model_', )`).\n\n Pydantic prevents collisions between model attributes and `BaseModel`'s own methods by\n namespacing them with the prefix `model_`.\n\n ```py\n import warnings\n\n from pydantic import BaseModel\n\n warnings.filterwarnings('error') # Raise warnings as errors\n\n try:\n\n class Model(BaseModel):\n model_prefixed_field: str\n\n except UserWarning as e:\n print(e)\n '''\n Field \"model_prefixed_field\" has conflict with protected namespace \"model_\".\n\n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n '''\n ```\n\n You can customize this behavior using the `protected_namespaces` setting:\n\n ```py\n import warnings\n\n from pydantic import BaseModel, ConfigDict\n\n warnings.filterwarnings('error') # Raise warnings as errors\n\n try:\n\n class Model(BaseModel):\n model_prefixed_field: str\n also_protect_field: str\n\n model_config = ConfigDict(\n protected_namespaces=('protect_me_', 'also_protect_')\n )\n\n except UserWarning as e:\n print(e)\n '''\n Field \"also_protect_field\" has conflict with protected namespace \"also_protect_\".\n\n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ('protect_me_',)`.\n '''\n ```\n\n While Pydantic will only emit a warning when an item is in a protected namespace but does not actually have a collision,\n an error _is_ raised if there is an actual collision with an existing attribute:\n\n ```py\n from pydantic import BaseModel\n\n try:\n\n class Model(BaseModel):\n model_validate: str\n\n except NameError as e:\n print(e)\n '''\n Field \"model_validate\" conflicts with member <bound method BaseModel.model_validate of <class 'pydantic.main.BaseModel'>> of protected namespace \"model_\".\n '''\n ```\n \"\"\"\n\n hide_input_in_errors: bool\n \"\"\"\n Whether to hide inputs when printing errors. Defaults to `False`.\n\n Pydantic shows the input value and type when it raises `ValidationError` during the validation.\n\n ```py\n from pydantic import BaseModel, ValidationError\n\n class Model(BaseModel):\n a: str\n\n try:\n Model(a=123)\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n a\n Input should be a valid string [type=string_type, input_value=123, input_type=int]\n '''\n ```\n\n You can hide the input value and type by setting the `hide_input_in_errors` config to `True`.\n\n ```py\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n class Model(BaseModel):\n a: str\n model_config = ConfigDict(hide_input_in_errors=True)\n\n try:\n Model(a=123)\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n a\n Input should be a valid string [type=string_type]\n '''\n ```\n \"\"\"\n\n defer_build: bool\n \"\"\"\n Whether to defer model validator and serializer construction until the first model validation.\n\n This can be useful to avoid the overhead of building models which are only\n used nested within other models, or when you want to manually define type namespace via\n [`Model.model_rebuild(_types_namespace=...)`][pydantic.BaseModel.model_rebuild]. Defaults to False.\n \"\"\"\n\n plugin_settings: dict[str, object] | None\n \"\"\"A `dict` of settings for plugins. Defaults to `None`.\n\n See [Pydantic Plugins](../concepts/plugins.md) for details.\n \"\"\"\n\n schema_generator: type[_GenerateSchema] | None\n \"\"\"\n A custom core schema generator class to use when generating JSON schemas.\n Useful if you want to change the way types are validated across an entire model/schema. Defaults to `None`.\n\n The `GenerateSchema` interface is subject to change, currently only the `string_schema` method is public.\n\n See [#6737](https://github.com/pydantic/pydantic/pull/6737) for details.\n \"\"\"\n\n json_schema_serialization_defaults_required: bool\n \"\"\"\n Whether fields with default values should be marked as required in the serialization schema. Defaults to `False`.\n\n This ensures that the serialization schema will reflect the fact a field with a default will always be present\n when serializing the model, even though it is not required for validation.\n\n However, there are scenarios where this may be undesirable — in particular, if you want to share the schema\n between validation and serialization, and don't mind fields with defaults being marked as not required during\n serialization. See [#7209](https://github.com/pydantic/pydantic/issues/7209) for more details.\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n class Model(BaseModel):\n a: str = 'a'\n\n model_config = ConfigDict(json_schema_serialization_defaults_required=True)\n\n print(Model.model_json_schema(mode='validation'))\n '''\n {\n 'properties': {'a': {'default': 'a', 'title': 'A', 'type': 'string'}},\n 'title': 'Model',\n 'type': 'object',\n }\n '''\n print(Model.model_json_schema(mode='serialization'))\n '''\n {\n 'properties': {'a': {'default': 'a', 'title': 'A', 'type': 'string'}},\n 'required': ['a'],\n 'title': 'Model',\n 'type': 'object',\n }\n '''\n ```\n \"\"\"\n\n json_schema_mode_override: Literal['validation', 'serialization', None]\n \"\"\"\n If not `None`, the specified mode will be used to generate the JSON schema regardless of what `mode` was passed to\n the function call. Defaults to `None`.\n\n This provides a way to force the JSON schema generation to reflect a specific mode, e.g., to always use the\n validation schema.\n\n It can be useful when using frameworks (such as FastAPI) that may generate different schemas for validation\n and serialization that must both be referenced from the same schema; when this happens, we automatically append\n `-Input` to the definition reference for the validation schema and `-Output` to the definition reference for the\n serialization schema. By specifying a `json_schema_mode_override` though, this prevents the conflict between\n the validation and serialization schemas (since both will use the specified schema), and so prevents the suffixes\n from being added to the definition references.\n\n ```py\n from pydantic import BaseModel, ConfigDict, Json\n\n class Model(BaseModel):\n a: Json[int] # requires a string to validate, but will dump an int\n\n print(Model.model_json_schema(mode='serialization'))\n '''\n {\n 'properties': {'a': {'title': 'A', 'type': 'integer'}},\n 'required': ['a'],\n 'title': 'Model',\n 'type': 'object',\n }\n '''\n\n class ForceInputModel(Model):\n # the following ensures that even with mode='serialization', we\n # will get the schema that would be generated for validation.\n model_config = ConfigDict(json_schema_mode_override='validation')\n\n print(ForceInputModel.model_json_schema(mode='serialization'))\n '''\n {\n 'properties': {\n 'a': {\n 'contentMediaType': 'application/json',\n 'contentSchema': {'type': 'integer'},\n 'title': 'A',\n 'type': 'string',\n }\n },\n 'required': ['a'],\n 'title': 'ForceInputModel',\n 'type': 'object',\n }\n '''\n ```\n \"\"\"\n\n coerce_numbers_to_str: bool\n \"\"\"\n If `True`, enables automatic coercion of any `Number` type to `str` in \"lax\" (non-strict) mode. Defaults to `False`.\n\n Pydantic doesn't allow number types (`int`, `float`, `Decimal`) to be coerced as type `str` by default.\n\n ```py\n from decimal import Decimal\n\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n class Model(BaseModel):\n value: str\n\n try:\n print(Model(value=42))\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n value\n Input should be a valid string [type=string_type, input_value=42, input_type=int]\n '''\n\n class Model(BaseModel):\n model_config = ConfigDict(coerce_numbers_to_str=True)\n\n value: str\n\n repr(Model(value=42).value)\n #> \"42\"\n repr(Model(value=42.13).value)\n #> \"42.13\"\n repr(Model(value=Decimal('42.13')).value)\n #> \"42.13\"\n ```\n \"\"\"" }, { "identifier": "DEFAULT_REF_TEMPLATE", "path": "backend/venv/lib/python3.10/site-packages/pydantic/json_schema.py", "snippet": "_MODE_TITLE_MAPPING: dict[JsonSchemaMode, str] = {'validation': 'Input', 'serialization': 'Output'}\nDEFAULT_REF_TEMPLATE = '#/$defs/{model}'\ndef update_json_schema(schema: JsonSchemaValue, updates: dict[str, Any]) -> JsonSchemaValue:\n def from_prioritized_choices(\n prioritized_choices: dict[DefsRef, list[DefsRef]],\n defs_to_json: dict[DefsRef, JsonRef],\n definitions: dict[DefsRef, JsonSchemaValue],\n ) -> _DefinitionsRemapping:\n def remap_defs_ref(self, ref: DefsRef) -> DefsRef:\n def remap_json_ref(self, ref: JsonRef) -> JsonRef:\n def remap_json_schema(self, schema: Any) -> Any:\n def __init__(self, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE):\n def _config(self) -> _config.ConfigWrapper:\n def mode(self) -> JsonSchemaMode:\n def build_schema_type_to_method(\n self,\n ) -> dict[CoreSchemaOrFieldType, Callable[[CoreSchemaOrField], JsonSchemaValue]]:\n def generate_definitions(\n self, inputs: Sequence[tuple[JsonSchemaKeyT, JsonSchemaMode, core_schema.CoreSchema]]\n ) -> tuple[dict[tuple[JsonSchemaKeyT, JsonSchemaMode], JsonSchemaValue], dict[DefsRef, JsonSchemaValue]]:\n def generate(self, schema: CoreSchema, mode: JsonSchemaMode = 'validation') -> JsonSchemaValue:\n def generate_inner(self, schema: CoreSchemaOrField) -> JsonSchemaValue: # noqa: C901\n def populate_defs(core_schema: CoreSchema, json_schema: JsonSchemaValue) -> JsonSchemaValue:\n def convert_to_all_of(json_schema: JsonSchemaValue) -> JsonSchemaValue:\n def handler_func(schema_or_field: CoreSchemaOrField) -> JsonSchemaValue:\n def new_handler_func(\n schema_or_field: CoreSchemaOrField,\n current_handler: GetJsonSchemaHandler = current_handler,\n js_modify_function: GetJsonSchemaFunction = js_modify_function,\n ) -> JsonSchemaValue:\n def new_handler_func(\n schema_or_field: CoreSchemaOrField,\n current_handler: GetJsonSchemaHandler = current_handler,\n js_modify_function: GetJsonSchemaFunction = js_modify_function,\n ) -> JsonSchemaValue:\n def any_schema(self, schema: core_schema.AnySchema) -> JsonSchemaValue:\n def none_schema(self, schema: core_schema.NoneSchema) -> JsonSchemaValue:\n def bool_schema(self, schema: core_schema.BoolSchema) -> JsonSchemaValue:\n def int_schema(self, schema: core_schema.IntSchema) -> JsonSchemaValue:\n def float_schema(self, schema: core_schema.FloatSchema) -> JsonSchemaValue:\n def decimal_schema(self, schema: core_schema.DecimalSchema) -> JsonSchemaValue:\n def str_schema(self, schema: core_schema.StringSchema) -> JsonSchemaValue:\n def bytes_schema(self, schema: core_schema.BytesSchema) -> JsonSchemaValue:\n def date_schema(self, schema: core_schema.DateSchema) -> JsonSchemaValue:\n def time_schema(self, schema: core_schema.TimeSchema) -> JsonSchemaValue:\n def datetime_schema(self, schema: core_schema.DatetimeSchema) -> JsonSchemaValue:\n def timedelta_schema(self, schema: core_schema.TimedeltaSchema) -> JsonSchemaValue:\n def literal_schema(self, schema: core_schema.LiteralSchema) -> JsonSchemaValue:\n def is_instance_schema(self, schema: core_schema.IsInstanceSchema) -> JsonSchemaValue:\n def is_subclass_schema(self, schema: core_schema.IsSubclassSchema) -> JsonSchemaValue:\n def callable_schema(self, schema: core_schema.CallableSchema) -> JsonSchemaValue:\n def list_schema(self, schema: core_schema.ListSchema) -> JsonSchemaValue:\n def tuple_positional_schema(self, schema: core_schema.TuplePositionalSchema) -> JsonSchemaValue:\n def tuple_variable_schema(self, schema: core_schema.TupleVariableSchema) -> JsonSchemaValue:\n def set_schema(self, schema: core_schema.SetSchema) -> JsonSchemaValue:\n def frozenset_schema(self, schema: core_schema.FrozenSetSchema) -> JsonSchemaValue:\n def _common_set_schema(self, schema: core_schema.SetSchema | core_schema.FrozenSetSchema) -> JsonSchemaValue:\n def generator_schema(self, schema: core_schema.GeneratorSchema) -> JsonSchemaValue:\n def dict_schema(self, schema: core_schema.DictSchema) -> JsonSchemaValue:\n def _function_schema(\n self,\n schema: _core_utils.AnyFunctionSchema,\n ) -> JsonSchemaValue:\n def function_before_schema(self, schema: core_schema.BeforeValidatorFunctionSchema) -> JsonSchemaValue:\n def function_after_schema(self, schema: core_schema.AfterValidatorFunctionSchema) -> JsonSchemaValue:\n def function_plain_schema(self, schema: core_schema.PlainValidatorFunctionSchema) -> JsonSchemaValue:\n def function_wrap_schema(self, schema: core_schema.WrapValidatorFunctionSchema) -> JsonSchemaValue:\n def default_schema(self, schema: core_schema.WithDefaultSchema) -> JsonSchemaValue:\n def nullable_schema(self, schema: core_schema.NullableSchema) -> JsonSchemaValue:\n def union_schema(self, schema: core_schema.UnionSchema) -> JsonSchemaValue:\n def tagged_union_schema(self, schema: core_schema.TaggedUnionSchema) -> JsonSchemaValue:\n def _extract_discriminator(\n self, schema: core_schema.TaggedUnionSchema, one_of_choices: list[_JsonDict]\n ) -> str | None:\n def chain_schema(self, schema: core_schema.ChainSchema) -> JsonSchemaValue:\n def lax_or_strict_schema(self, schema: core_schema.LaxOrStrictSchema) -> JsonSchemaValue:\n def json_or_python_schema(self, schema: core_schema.JsonOrPythonSchema) -> JsonSchemaValue:\n def typed_dict_schema(self, schema: core_schema.TypedDictSchema) -> JsonSchemaValue:\n def _name_required_computed_fields(\n computed_fields: list[ComputedField],\n ) -> list[tuple[str, bool, core_schema.ComputedField]]:\n def _named_required_fields_schema(\n self, named_required_fields: Sequence[tuple[str, bool, CoreSchemaField]]\n ) -> JsonSchemaValue:\n def _get_alias_name(self, field: CoreSchemaField, name: str) -> str:\n def typed_dict_field_schema(self, schema: core_schema.TypedDictField) -> JsonSchemaValue:\n def dataclass_field_schema(self, schema: core_schema.DataclassField) -> JsonSchemaValue:\n def model_field_schema(self, schema: core_schema.ModelField) -> JsonSchemaValue:\n def computed_field_schema(self, schema: core_schema.ComputedField) -> JsonSchemaValue:\n def model_schema(self, schema: core_schema.ModelSchema) -> JsonSchemaValue:\n def _update_class_schema(\n self,\n json_schema: JsonSchemaValue,\n title: str | None,\n extra: Literal['allow', 'ignore', 'forbid'] | None,\n cls: type[Any],\n json_schema_extra: dict[str, Any] | JsonSchemaExtraCallable | None,\n ) -> JsonSchemaValue:\n def resolve_schema_to_update(self, json_schema: JsonSchemaValue) -> JsonSchemaValue:\n def model_fields_schema(self, schema: core_schema.ModelFieldsSchema) -> JsonSchemaValue:\n def field_is_present(self, field: CoreSchemaField) -> bool:\n def field_is_required(\n self,\n field: core_schema.ModelField | core_schema.DataclassField | core_schema.TypedDictField,\n total: bool,\n ) -> bool:\n def dataclass_args_schema(self, schema: core_schema.DataclassArgsSchema) -> JsonSchemaValue:\n def dataclass_schema(self, schema: core_schema.DataclassSchema) -> JsonSchemaValue:\n def arguments_schema(self, schema: core_schema.ArgumentsSchema) -> JsonSchemaValue:\n def kw_arguments_schema(\n self, arguments: list[core_schema.ArgumentsParameter], var_kwargs_schema: CoreSchema | None\n ) -> JsonSchemaValue:\n def p_arguments_schema(\n self, arguments: list[core_schema.ArgumentsParameter], var_args_schema: CoreSchema | None\n ) -> JsonSchemaValue:\n def get_argument_name(self, argument: core_schema.ArgumentsParameter) -> str:\n def call_schema(self, schema: core_schema.CallSchema) -> JsonSchemaValue:\n def custom_error_schema(self, schema: core_schema.CustomErrorSchema) -> JsonSchemaValue:\n def json_schema(self, schema: core_schema.JsonSchema) -> JsonSchemaValue:\n def url_schema(self, schema: core_schema.UrlSchema) -> JsonSchemaValue:\n def multi_host_url_schema(self, schema: core_schema.MultiHostUrlSchema) -> JsonSchemaValue:\n def uuid_schema(self, schema: core_schema.UuidSchema) -> JsonSchemaValue:\n def definitions_schema(self, schema: core_schema.DefinitionsSchema) -> JsonSchemaValue:\n def definition_ref_schema(self, schema: core_schema.DefinitionReferenceSchema) -> JsonSchemaValue:\n def ser_schema(\n self, schema: core_schema.SerSchema | core_schema.IncExSeqSerSchema | core_schema.IncExDictSerSchema\n ) -> JsonSchemaValue | None:\n def get_title_from_name(self, name: str) -> str:\n def field_title_should_be_set(self, schema: CoreSchemaOrField) -> bool:\n def normalize_name(self, name: str) -> str:\n def get_defs_ref(self, core_mode_ref: CoreModeRef) -> DefsRef:\n def get_cache_defs_ref_schema(self, core_ref: CoreRef) -> tuple[DefsRef, JsonSchemaValue]:\n def handle_ref_overrides(self, json_schema: JsonSchemaValue) -> JsonSchemaValue:\n def get_schema_from_definitions(self, json_ref: JsonRef) -> JsonSchemaValue | None:\n def encode_default(self, dft: Any) -> Any:\n def update_with_validations(\n self, json_schema: JsonSchemaValue, core_schema: CoreSchema, mapping: dict[str, str]\n ) -> None:\n def get_flattened_anyof(self, schemas: list[JsonSchemaValue]) -> JsonSchemaValue:\n def get_json_ref_counts(self, json_schema: JsonSchemaValue) -> dict[JsonRef, int]:\n def _add_json_refs(schema: Any) -> None:\n def handle_invalid_for_json_schema(self, schema: CoreSchemaOrField, error_info: str) -> JsonSchemaValue:\n def emit_warning(self, kind: JsonSchemaWarningKind, detail: str) -> None:\n def render_warning_message(self, kind: JsonSchemaWarningKind, detail: str) -> str | None:\n def _build_definitions_remapping(self) -> _DefinitionsRemapping:\n def _garbage_collect_definitions(self, schema: JsonSchemaValue) -> None:\ndef model_json_schema(\n cls: type[BaseModel] | type[PydanticDataclass],\n by_alias: bool = True,\n ref_template: str = DEFAULT_REF_TEMPLATE,\n schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema,\n mode: JsonSchemaMode = 'validation',\n) -> dict[str, Any]:\ndef models_json_schema(\n models: Sequence[tuple[type[BaseModel] | type[PydanticDataclass], JsonSchemaMode]],\n *,\n by_alias: bool = True,\n title: str | None = None,\n description: str | None = None,\n ref_template: str = DEFAULT_REF_TEMPLATE,\n schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema,\n) -> tuple[dict[tuple[type[BaseModel] | type[PydanticDataclass], JsonSchemaMode], JsonSchemaValue], JsonSchemaValue]:\ndef _deduplicate_schemas(schemas: Iterable[_JsonDict]) -> list[_JsonDict]:\ndef _make_json_hashable(value: _Json) -> _HashableJson:\ndef _sort_json_schema(value: JsonSchemaValue, parent_key: str | None = None) -> JsonSchemaValue:\n def __get_pydantic_json_schema__(\n self, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler\n ) -> JsonSchemaValue:\n def __hash__(self) -> int:\n def __get_pydantic_json_schema__(\n self, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler\n ) -> JsonSchemaValue:\n def __hash__(self) -> int:\ndef _get_all_json_refs(item: Any) -> set[JsonRef]:\n def __class_getitem__(cls, item: AnyType) -> AnyType:\n def __get_pydantic_json_schema__(\n self, core_schema: CoreSchema, handler: GetJsonSchemaHandler\n ) -> JsonSchemaValue:\n def __hash__(self) -> int:\ndef _get_typed_dict_config(schema: core_schema.TypedDictSchema) -> ConfigDict:\nclass PydanticJsonSchemaWarning(UserWarning):\nclass _DefinitionsRemapping:\nclass GenerateJsonSchema:\n class ValidationsMapping:\nclass WithJsonSchema:\nclass Examples:\n class SkipJsonSchema:" }, { "identifier": "create_schema_validator", "path": "backend/venv/lib/python3.10/site-packages/pydantic/plugin/_schema_validator.py", "snippet": "def create_schema_validator(\n schema: CoreSchema, config: CoreConfig | None = None, plugin_settings: dict[str, Any] | None = None\n) -> SchemaValidator:\n \"\"\"Create a `SchemaValidator` or `PluggableSchemaValidator` if plugins are installed.\n\n Returns:\n If plugins are installed then return `PluggableSchemaValidator`, otherwise return `SchemaValidator`.\n \"\"\"\n from ._loader import get_plugins\n\n plugins = get_plugins()\n if plugins:\n return PluggableSchemaValidator(schema, config, plugins, plugin_settings or {}) # type: ignore\n else:\n return SchemaValidator(schema, config)" } ]
import sys from dataclasses import is_dataclass from typing import TYPE_CHECKING, Any, Dict, Generic, Iterable, Set, TypeVar, Union, overload from pydantic_core import CoreSchema, SchemaSerializer, SchemaValidator, Some from typing_extensions import Literal, is_typeddict from pydantic.errors import PydanticUserError from pydantic.main import BaseModel from ._internal import _config, _core_utils, _discriminated_union, _generate_schema, _typing_extra from .config import ConfigDict from .json_schema import ( DEFAULT_REF_TEMPLATE, GenerateJsonSchema, JsonSchemaKeyT, JsonSchemaMode, JsonSchemaValue, ) from .plugin._schema_validator import create_schema_validator
14,860
```py from typing import List from pydantic import BaseModel, TypeAdapter class Item(BaseModel): id: int name: str # `item_data` could come from an API call, eg., via something like: # item_data = requests.get('https://my-api.com/items').json() item_data = [{'id': 1, 'name': 'My Item'}] items = TypeAdapter(List[Item]).validate_python(item_data) print(items) #> [Item(id=1, name='My Item')] ``` [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] is capable of parsing data into any of the types Pydantic can handle as fields of a [`BaseModel`][pydantic.main.BaseModel]. """ # noqa: D212 from __future__ import annotations as _annotations T = TypeVar('T') if TYPE_CHECKING: # should be `set[int] | set[str] | dict[int, IncEx] | dict[str, IncEx] | None`, but mypy can't cope IncEx = Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any]] def _get_schema(type_: Any, config_wrapper: _config.ConfigWrapper, parent_depth: int) -> CoreSchema: """`BaseModel` uses its own `__module__` to find out where it was defined and then look for symbols to resolve forward references in those globals. On the other hand this function can be called with arbitrary objects, including type aliases where `__module__` (always `typing.py`) is not useful. So instead we look at the globals in our parent stack frame. This works for the case where this function is called in a module that has the target of forward references in its scope, but does not work for more complex cases. For example, take the following: a.py ```python from typing import Dict, List IntList = List[int] OuterDict = Dict[str, 'IntList'] ``` b.py ```python test="skip" from a import OuterDict from pydantic import TypeAdapter IntList = int # replaces the symbol the forward reference is looking for v = TypeAdapter(OuterDict) v({'x': 1}) # should fail but doesn't ``` If OuterDict were a `BaseModel`, this would work because it would resolve the forward reference within the `a.py` namespace. But `TypeAdapter(OuterDict)` can't know what module OuterDict came from. In other words, the assumption that _all_ forward references exist in the module we are being called from is not technically always true. Although most of the time it is and it works fine for recursive models and such, `BaseModel`'s behavior isn't perfect either and _can_ break in similar ways, so there is no right or wrong between the two. But at the very least this behavior is _subtly_ different from `BaseModel`'s. """ local_ns = _typing_extra.parent_frame_namespace(parent_depth=parent_depth) global_ns = sys._getframe(max(parent_depth - 1, 1)).f_globals.copy() global_ns.update(local_ns or {}) gen = _generate_schema.GenerateSchema(config_wrapper, types_namespace=global_ns, typevars_map={}) schema = gen.generate_schema(type_) schema = gen.collect_definitions(schema) return schema def _getattr_no_parents(obj: Any, attribute: str) -> Any: """Returns the attribute value without attempting to look up attributes from parent types.""" if hasattr(obj, '__dict__'): try: return obj.__dict__[attribute] except KeyError: pass slots = getattr(obj, '__slots__', None) if slots is not None and attribute in slots: return getattr(obj, attribute) else: raise AttributeError(attribute) class TypeAdapter(Generic[T]): """Type adapters provide a flexible way to perform validation and serialization based on a Python type. A `TypeAdapter` instance exposes some of the functionality from `BaseModel` instance methods for types that do not have such methods (such as dataclasses, primitive types, and more). Note that `TypeAdapter` is not an actual type, so you cannot use it in type annotations. Attributes: core_schema: The core schema for the type. validator (SchemaValidator): The schema validator for the type. serializer: The schema serializer for the type. """ if TYPE_CHECKING: @overload
""" You may have types that are not `BaseModel`s that you want to validate data against. Or you may want to validate a `List[SomeModel]`, or dump it to JSON. For use cases like this, Pydantic provides [`TypeAdapter`][pydantic.type_adapter.TypeAdapter], which can be used for type validation, serialization, and JSON schema generation without creating a [`BaseModel`][pydantic.main.BaseModel]. A [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] instance exposes some of the functionality from [`BaseModel`][pydantic.main.BaseModel] instance methods for types that do not have such methods (such as dataclasses, primitive types, and more): ```py from typing import List from typing_extensions import TypedDict from pydantic import TypeAdapter, ValidationError class User(TypedDict): name: str id: int UserListValidator = TypeAdapter(List[User]) print(repr(UserListValidator.validate_python([{'name': 'Fred', 'id': '3'}]))) #> [{'name': 'Fred', 'id': 3}] try: UserListValidator.validate_python( [{'name': 'Fred', 'id': 'wrong', 'other': 'no'}] ) except ValidationError as e: print(e) ''' 1 validation error for list[typed-dict] 0.id Input should be a valid integer, unable to parse string as an integer [type=int_parsing, input_value='wrong', input_type=str] ''' ``` Note: Despite some overlap in use cases with [`RootModel`][pydantic.root_model.RootModel], [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] should not be used as a type annotation for specifying fields of a `BaseModel`, etc. ## Parsing data into a specified type [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] can be used to apply the parsing logic to populate Pydantic models in a more ad-hoc way. This function behaves similarly to [`BaseModel.model_validate`][pydantic.main.BaseModel.model_validate], but works with arbitrary Pydantic-compatible types. This is especially useful when you want to parse results into a type that is not a direct subclass of [`BaseModel`][pydantic.main.BaseModel]. For example: ```py from typing import List from pydantic import BaseModel, TypeAdapter class Item(BaseModel): id: int name: str # `item_data` could come from an API call, eg., via something like: # item_data = requests.get('https://my-api.com/items').json() item_data = [{'id': 1, 'name': 'My Item'}] items = TypeAdapter(List[Item]).validate_python(item_data) print(items) #> [Item(id=1, name='My Item')] ``` [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] is capable of parsing data into any of the types Pydantic can handle as fields of a [`BaseModel`][pydantic.main.BaseModel]. """ # noqa: D212 from __future__ import annotations as _annotations T = TypeVar('T') if TYPE_CHECKING: # should be `set[int] | set[str] | dict[int, IncEx] | dict[str, IncEx] | None`, but mypy can't cope IncEx = Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any]] def _get_schema(type_: Any, config_wrapper: _config.ConfigWrapper, parent_depth: int) -> CoreSchema: """`BaseModel` uses its own `__module__` to find out where it was defined and then look for symbols to resolve forward references in those globals. On the other hand this function can be called with arbitrary objects, including type aliases where `__module__` (always `typing.py`) is not useful. So instead we look at the globals in our parent stack frame. This works for the case where this function is called in a module that has the target of forward references in its scope, but does not work for more complex cases. For example, take the following: a.py ```python from typing import Dict, List IntList = List[int] OuterDict = Dict[str, 'IntList'] ``` b.py ```python test="skip" from a import OuterDict from pydantic import TypeAdapter IntList = int # replaces the symbol the forward reference is looking for v = TypeAdapter(OuterDict) v({'x': 1}) # should fail but doesn't ``` If OuterDict were a `BaseModel`, this would work because it would resolve the forward reference within the `a.py` namespace. But `TypeAdapter(OuterDict)` can't know what module OuterDict came from. In other words, the assumption that _all_ forward references exist in the module we are being called from is not technically always true. Although most of the time it is and it works fine for recursive models and such, `BaseModel`'s behavior isn't perfect either and _can_ break in similar ways, so there is no right or wrong between the two. But at the very least this behavior is _subtly_ different from `BaseModel`'s. """ local_ns = _typing_extra.parent_frame_namespace(parent_depth=parent_depth) global_ns = sys._getframe(max(parent_depth - 1, 1)).f_globals.copy() global_ns.update(local_ns or {}) gen = _generate_schema.GenerateSchema(config_wrapper, types_namespace=global_ns, typevars_map={}) schema = gen.generate_schema(type_) schema = gen.collect_definitions(schema) return schema def _getattr_no_parents(obj: Any, attribute: str) -> Any: """Returns the attribute value without attempting to look up attributes from parent types.""" if hasattr(obj, '__dict__'): try: return obj.__dict__[attribute] except KeyError: pass slots = getattr(obj, '__slots__', None) if slots is not None and attribute in slots: return getattr(obj, attribute) else: raise AttributeError(attribute) class TypeAdapter(Generic[T]): """Type adapters provide a flexible way to perform validation and serialization based on a Python type. A `TypeAdapter` instance exposes some of the functionality from `BaseModel` instance methods for types that do not have such methods (such as dataclasses, primitive types, and more). Note that `TypeAdapter` is not an actual type, so you cannot use it in type annotations. Attributes: core_schema: The core schema for the type. validator (SchemaValidator): The schema validator for the type. serializer: The schema serializer for the type. """ if TYPE_CHECKING: @overload
def __new__(cls, __type: type[T], *, config: ConfigDict | None = ...) -> TypeAdapter[T]:
5
2023-10-23 18:09:28+00:00
24k
zju3dv/nr_in_a_room
test/test_optim_pano.py
[ { "identifier": "RoomOptimizer", "path": "optim/room_optimizer.py", "snippet": "class RoomOptimizer:\n def __init__(\n self,\n scale_factor: float,\n bg_scale_factor: float,\n bg_scene_center: list,\n img_wh: list,\n near: float,\n far: float,\n chunk: int,\n model_ckpt_path_dict: Dict[str, Any],\n config=None,\n scale_factor_dict: Dict[str, Any] = {},\n scene_info_path: str = None,\n scene_info_json_path: str = None,\n model_type=\"NeuS\",\n N_samples: int = 64,\n N_importance: int = 128,\n relation_info: Dict[str, Any] = {},\n output_path: str = None,\n prefix: str = \"\",\n active_instance_id: list = [46, 4, 9, 102],\n virtual_instance_id: list = [], # specific for edit (insert virtual to real) mode\n filter_door_and_window: bool = True,\n lr: float = 1e-2,\n N_optim_step: int = 500,\n adjust_lr_per_step: int = 150,\n optim_batch_size: int = 1024,\n use_amp: bool = False,\n extract_obj_bbox_from_neural_model: bool = False,\n ig_data_base_dir: str = \"data/ig_dataset_v1.0.1/\",\n mask_per_object: bool = False,\n bbox_ray_intersect: bool = True,\n bbox_enlarge: float = 0.1,\n optimize_light_env: bool = True,\n optimize_appearance_code: bool = False,\n use_light_from_image_attr: bool = False,\n use_appearance_from_image_attr: bool = False,\n optimize_option: list = [\n \"photometric_loss\",\n \"perceptual_loss\",\n \"z_axis_align_loss\",\n \"object_room_wall_attach\",\n \"object_room_floor_attach\",\n \"physical_violation\",\n \"object_object_attach\",\n ],\n ):\n # load config\n self.scene_info_path = scene_info_path\n self.scale_factor = scale_factor\n self.scale_factor_dict = scale_factor_dict\n self.bg_scale_factor = bg_scale_factor\n self.bg_scene_center = np.array(bg_scene_center)\n self.ig_data_base_dir = ig_data_base_dir\n self.mask_per_object = mask_per_object\n self.bbox_ray_intersect = bbox_ray_intersect\n self.bbox_enlarge = bbox_enlarge\n self.virtual_instance_id = virtual_instance_id\n\n self.img_wh = img_wh\n self.w = img_wh[0]\n self.h = img_wh[1]\n self.near = near\n self.far = far\n self.N_importance = N_importance\n self.N_samples = N_samples\n self.chunk = chunk\n self.lr = lr\n self.N_optim_step = N_optim_step\n self.adjust_lr_per_step = adjust_lr_per_step\n self.optim_batch_size = optim_batch_size\n self.use_amp = use_amp\n self.optimize_light_env = optimize_light_env\n self.optimize_appearance_code = optimize_appearance_code\n self.optimize_option = optimize_option\n self.config = config\n\n self.use_light_from_image_attr = use_light_from_image_attr\n if self.use_light_from_image_attr:\n print(\n \"WARNING: self.use_light_from_image_attr = True, using hard coded light env.\"\n )\n self.hard_coded_light_id = 0 # just for compatibility\n # self.hard_coded_light_id = 9 # probe_03 in 10 HDR multi_light training\n\n self.use_appearance_from_image_attr = use_appearance_from_image_attr\n if self.use_appearance_from_image_attr:\n print(\n \"WARNING: self.use_appearance_from_image_attr = True, using first frame appearance code.\"\n )\n self.hard_coded_appearance_frame_id = 0\n\n self.optimize_exposure = \"optimize_exposure\" in self.optimize_option\n\n # laod scene info\n if scene_info_json_path is None:\n scene_info_json_path = os.path.join(scene_info_path, \"data.json\")\n self.scene_meta = read_json(scene_info_json_path)\n\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n self.relation_info = relation_info\n\n self.model_type = model_type\n # self.load_model(\n # model_type, model_ckpt_path_dict[\"obj\"], model_ckpt_path_dict[\"bg\"]\n # )\n self.load_model_from_dict_path(model_type, model_ckpt_path_dict)\n\n self.reset_optimizable_parameters()\n\n if extract_obj_bbox_from_neural_model:\n self.extract_bounding_boxes_from_neural_model()\n\n if self.bbox_ray_intersect:\n self.prepare_bbox_ray_helper()\n\n self.set_output_path(output_path, prefix)\n\n print(\"RoomOptimizer initialize finished.\")\n\n def load_model_from_dict_path(self, model_type, model_ckpt_path_dict):\n assert model_type == \"NeuS\"\n self.models = {}\n self.image_attrs = {}\n\n # avoid duplicate loading\n self.models_cache = {}\n self.image_attrs_cache = {}\n\n print(\"loading model with instance_id\", self.active_instance_id)\n\n # print(model_ckpt_path_dict)\n for obj_id in self.active_instance_id:\n # identify ckpt_path\n if str(obj_id) in model_ckpt_path_dict:\n ckpt_info = model_ckpt_path_dict[str(obj_id)]\n elif obj_id == 0:\n assert (\n \"bg\" in model_ckpt_path_dict or \"0\" in model_ckpt_path_dict\n ), \"model_ckpt_path_dict missing background 'bg' or '0' ckpt\"\n ckpt_info = model_ckpt_path_dict.get(\"bg\", model_ckpt_path_dict[\"0\"])\n else:\n print(\n f\"Cannot find specific model for obj_id = {obj_id}, \\\n maybe config file is not compatible with given active_instance_id.\"\n )\n ckpt_info = model_ckpt_path_dict[\"obj\"]\n # load with cache\n ckpt_path, neus_conf = ckpt_info[\"path\"], ckpt_info[\"neus_conf\"]\n if ckpt_info not in self.models_cache:\n (\n self.models_cache[ckpt_path],\n self.image_attrs_cache[ckpt_path],\n ) = self.load_model_neus(ckpt_path, obj_id, neus_conf)\n self.models[f\"neus_{obj_id}\"] = self.models_cache[ckpt_path]\n self.image_attrs[str(obj_id)] = self.image_attrs_cache[ckpt_path]\n\n def load_model_nerf(self, ckpt_path):\n # TODO(ybbbbt): fix hard coding\n conf = {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n }\n nerf_coarse = NeRF_Object(conf)\n nerf_fine = NeRF_Object(conf)\n image_attributes = ImageAttributes(conf)\n load_ckpt(nerf_coarse, ckpt_path, model_name=\"nerf_coarse\")\n load_ckpt(nerf_fine, ckpt_path, model_name=\"nerf_fine\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n nerf_coarse = nerf_coarse.cuda().eval()\n nerf_fine = nerf_fine.cuda().eval()\n image_attributes = image_attributes.cuda().eval()\n\n models = {\n \"coarse\": nerf_coarse,\n \"fine\": nerf_fine,\n }\n\n embedding_xyz = Embedding(3, 10)\n embedding_dir = Embedding(3, 4)\n embeddings = {\n \"xyz\": embedding_xyz,\n \"dir\": embedding_dir,\n }\n return models, embeddings, image_attributes\n\n def load_model_neus(self, ckpt_path, obj_id, config_path=\"config/neus.yaml\"):\n conf = {\n \"model\": {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n },\n }\n if self.optimize_light_env:\n # conf[\"model\"].update({\"N_max_lights\": 128, \"N_light_embedding\": 16})\n conf[\"model\"].update({\"N_max_lights\": 1024, \"N_light_embedding\": 16})\n\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n conf[\"model\"].update(\n {\"N_max_appearance_frames\": 10000, \"N_appearance_embedding\": 16}\n )\n\n neus, render_kwargs_train, render_kwargs_test = get_model_neus(\n config_path=config_path, need_trainer=False, extra_conf=conf\n )\n self.render_kwargs_neus = render_kwargs_test\n image_attributes = ImageAttributes(conf[\"model\"])\n\n print(ckpt_path)\n load_ckpt(neus, ckpt_path, model_name=\"neus\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n if self.config is not None and (\n str(obj_id) in self.config.get(\"map_virtual_to_local\", {})\n ):\n # image_attributes.embedding_instance\n real_id_in_ckpt = self.config.map_virtual_to_local[str(obj_id)]\n image_attributes.embedding_instance.weight.requires_grad = False\n image_attributes.embedding_instance.weight[\n obj_id\n ] = image_attributes.embedding_instance.weight[real_id_in_ckpt]\n # ipdb.set_trace()\n\n neus.cuda().eval()\n image_attributes.cuda().eval()\n return neus, image_attributes\n\n def reset_optimizable_parameters(self):\n self.params = []\n self.relation_info = {}\n if self.optimize_light_env:\n self.initialize_light_code()\n\n if self.optimize_appearance_code:\n self.initialize_appearance_code()\n\n if self.optimize_exposure:\n self.initialize_autoexposure()\n\n def save_optimizable_parameters(self, path):\n all_param_dict = {}\n # all_param_dict[\"params\"] = self.params\n all_param_dict[\"relation_info\"] = self.relation_info\n all_param_dict[\"object_pose_dict\"] = copy.deepcopy(self.object_pose_dict)\n all_param_dict[\"active_instance_id\"] = copy.deepcopy(self.active_instance_id)\n if self.optimize_light_env:\n all_param_dict[\"light_code\"] = copy.deepcopy(self.light_code_dict)\n if self.optimize_appearance_code:\n all_param_dict[\"appearance_code\"] = copy.deepcopy(self.appearance_code_dict)\n if self.optimize_exposure:\n all_param_dict[\"exposure\"] = copy.deepcopy(self.autoexposure_param)\n torch.save(all_param_dict, path)\n\n def load_optimizable_parameters(self, path):\n all_param_dict = torch.load(path)\n # self.params = all_param_dict[\"params\"]\n self.relation_info = all_param_dict[\"relation_info\"]\n if len(self.virtual_instance_id) == 0: # not overwrite in edit mode\n self.active_instance_id = all_param_dict[\"active_instance_id\"]\n\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if len(self.virtual_instance_id) == 0: # not modify edit mode pose\n if hasattr(self, \"object_pose_dict\"):\n self.object_pose_dict.update(all_param_dict[\"object_pose_dict\"])\n else:\n self.object_pose_dict = all_param_dict[\"object_pose_dict\"]\n if self.optimize_light_env:\n self.light_code_dict = all_param_dict[\"light_code\"]\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n self.appearance_code_dict = all_param_dict[\"appearance_code\"]\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure and \"exposure\" in all_param_dict:\n self.autoexposure_param = all_param_dict[\"exposure\"]\n to_gpu(self.autoexposure_param)\n # ipdb.set_trace()\n\n def interpolate_light_env_from_states(self, path1, path2, interp):\n all_param_dict_1 = torch.load(path1)\n all_param_dict_2 = torch.load(path2)\n\n # self.params = all_param_dict[\"params\"]\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if self.optimize_light_env:\n light_code_dict_1 = all_param_dict_1[\"light_code\"]\n light_code_dict_2 = all_param_dict_2[\"light_code\"]\n for k, v in self.light_code_dict.items():\n self.light_code_dict[k] = light_code_dict_1[\n k\n ] * interp + light_code_dict_2[k] * (1 - interp)\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n appearance_code_dict_1 = all_param_dict_1[\"appearance_code\"]\n appearance_code_dict_2 = all_param_dict_2[\"appearance_code\"]\n for k, v in self.appearance_code_dict.items():\n self.appearance_code_dict[k] = appearance_code_dict_1[\n k\n ] * interp + appearance_code_dict_2[k] * (1 - interp)\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure:\n autoexposure_param_1 = all_param_dict_1[\"exposure\"]\n autoexposure_param_2 = all_param_dict_2[\"exposure\"]\n for k, v in self.autoexposure_param.items():\n self.autoexposure_param[k] = autoexposure_param_1[\n k\n ] * interp + autoexposure_param_2[k] * (1 - interp)\n to_gpu(self.autoexposure_param)\n\n def reset_active_instance_id(self, active_instance_id, filter_door_and_window=True):\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n def set_output_path(self, output_path: str, prefix: str, with_timestamp=True):\n if output_path is not None:\n if with_timestamp:\n self.output_path = os.path.join(\n output_path, f\"rendered_{get_timestamp()}_{prefix}\"\n )\n else:\n self.output_path = os.path.join(output_path, f\"{prefix}\")\n os.makedirs(self.output_path, exist_ok=True)\n\n def filter_door_and_window(self):\n print(\"Filtering door and window objects.\")\n filtered_active_instance_id = []\n for obj_id in self.active_instance_id:\n if self.get_type_of_instance(obj_id) not in [\"door\", \"window\"]:\n filtered_active_instance_id += [obj_id]\n self.active_instance_id = filtered_active_instance_id\n\n def initialize_light_code(self):\n self.light_code_dict = {}\n for obj_id in self.active_instance_id:\n # light_code = torch.randn((16)).cuda()\n light_code = torch.zeros((16)).cuda()\n light_code.requires_grad = True\n self.params += [\n {\"params\": light_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.light_code_dict[str(obj_id)] = light_code\n\n def initialize_appearance_code(self):\n self.appearance_code_dict = {}\n for obj_id in self.active_instance_id:\n # appearance_code = torch.randn((16)).cuda()\n appearance_code = torch.zeros((16)).cuda()\n appearance_code.requires_grad = True\n self.params += [\n {\"params\": appearance_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.appearance_code_dict[str(obj_id)] = appearance_code\n\n def initialize_autoexposure(self):\n self.autoexposure_param = {}\n for obj_id in self.active_instance_id:\n # scale and shift\n autoexposure_param = torch.Tensor([1, 1, 1, 0, 0, 0]).cuda()\n autoexposure_param.requires_grad = True\n self.params += [\n {\"params\": autoexposure_param, \"lr\": self.lr * 0.1}\n ] # light code can be optimized with larger lr\n self.autoexposure_param[str(obj_id)] = autoexposure_param\n\n def get_scale_factor(self, obj_id):\n if obj_id == 0:\n return self.bg_scale_factor\n elif str(obj_id) in self.scale_factor_dict:\n return self.scale_factor_dict[str(obj_id)]\n else:\n return self.scale_factor\n\n def extract_bounding_boxes_from_neural_model(self):\n print(\"Extracting object bounding boxes from neural model...\")\n assert self.model_type == \"NeuS\"\n for obj_id in tqdm(self.active_instance_id):\n mesh = extract_mesh_from_neus(\n self.models[f\"neus_{obj_id}\"],\n self.image_attrs[str(obj_id)],\n obj_id,\n )\n bbox = mesh.get_axis_aligned_bounding_box()\n bound = np.array([bbox.min_bound, bbox.max_bound])\n size = (bound[1] - bound[0]) * self.get_scale_factor(obj_id)\n # update scene_meta\n for idx, obj_info in enumerate(self.scene_meta[\"objs\"]):\n if obj_info[\"id\"] == obj_id:\n self.scene_meta[\"objs\"][idx][\"bdb3d\"][\"size\"] = size.tolist()\n\n def prepare_bbox_ray_helper(self):\n # bbox ray helper dict\n self.bbox_ray_helper_dict = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n length = np.array(obj_meta_info[\"bbox3d\"][\"size\"])\n self.bbox_ray_helper_dict[str(obj_id)] = BBoxRayHelper(np.zeros(3), length)\n\n def generate_object_rays(\n self, rays_o_obj, rays_d_obj, obj_id, near=None, far=None, select_ind=None\n ):\n \"\"\"\n Generate object rays given rays_o, rays_d and obj_id\n Input:\n select_ind: only for masked rendering\n \"\"\"\n if obj_id == 0: # background\n return self.generate_bg_rays(rays_o_obj, rays_d_obj, near=near, far=far)\n if self.bbox_ray_intersect:\n # for object, rays_o and rays_d should lie in world scale (unscaled)\n bbox_mask, bbox_batch_near, bbox_batch_far = self.bbox_ray_helper_dict[\n str(obj_id)\n ].get_ray_bbox_intersections(\n rays_o_obj,\n rays_d_obj,\n self.get_scale_factor(obj_id),\n # bbox_enlarge=self.bbox_enlarge / self.get_scale_factor(obj_id),\n bbox_enlarge=self.bbox_enlarge, # in physical world\n )\n # for area which hits bbox, we use bbox hit near far\n # bbox_ray_helper has scale for us, do no need to rescale\n batch_near_obj, batch_far_obj = bbox_batch_near, bbox_batch_far\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n # for the invalid part, we use 0 as near far, which assume that (0, 0, 0) is empty\n batch_near_obj[~bbox_mask] = torch.zeros_like(batch_near_obj[~bbox_mask])\n batch_far_obj[~bbox_mask] = torch.zeros_like(batch_far_obj[~bbox_mask])\n else:\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_obj = (\n near\n / self.get_scale_factor(obj_id)\n * torch.ones_like(rays_o_obj[:, :1])\n )\n batch_far_obj = (\n far / self.get_scale_factor(obj_id) * torch.ones_like(rays_d_obj[:, :1])\n )\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n\n if self.mask_per_object:\n # mask out of bound rendering\n obj_mask = torch.from_numpy(self.instance_mask == obj_id).view(-1)\n obj_mask = obj_mask[select_ind]\n batch_near_obj[~obj_mask] = 0\n batch_far_obj[~obj_mask] = 0\n\n rays_obj = torch.cat(\n [rays_o_obj, rays_d_obj, batch_near_obj, batch_far_obj], 1\n ) # (H*W, 8)\n rays_obj = rays_obj.cuda()\n return rays_obj\n\n def generate_bg_rays(self, rays_o_bg, rays_d_bg, near=None, far=None):\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_bg = near / self.bg_scale_factor * torch.ones_like(rays_o_bg[:, :1])\n batch_far_bg = far / self.bg_scale_factor * torch.ones_like(rays_d_bg[:, :1])\n rays_o_bg = rays_o_bg / self.bg_scale_factor\n rays_bg = torch.cat(\n [rays_o_bg, rays_d_bg, batch_near_bg, batch_far_bg], 1\n ) # (H*W, 8)\n rays_bg = rays_bg.cuda()\n return rays_bg\n\n def batched_inference_multi(\n self,\n rays_list,\n obj_id_list,\n to_cpu=True,\n hit_test_only=False,\n need_normal=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=True,\n refine_edge=False,\n refine_edge_obj_ids=[],\n render_mask=False,\n # use_sphere_tracing=False,\n show_progress=False,\n **kwargs,\n ):\n \"\"\"Do batched inference on rays using chunk.\"\"\"\n B = rays_list[0].shape[0]\n results = defaultdict(list)\n for i in tqdm(range(0, B, self.chunk), disable=not show_progress):\n extra_chunk = dict()\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor) and \"autoexposure_\" not in k:\n extra_chunk[k] = v[i : i + self.chunk]\n else:\n extra_chunk[k] = v\n if self.model_type == \"NeRF\":\n rendered_ray_chunks = render_rays_multi(\n self.models,\n self.embeddings,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n self.N_samples,\n use_disp=False,\n perturb=0.001,\n # perturb=0.00,\n noise_std=0,\n N_importance=self.N_importance,\n chunk=self.chunk,\n white_back=True,\n individual_weight_for_coarse=True,\n obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n **extra_chunk,\n )\n elif self.model_type == \"NeuS\":\n rendered_ray_chunks = render_rays_multi_neus(\n self,\n self.models,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n noise_std=0,\n white_back=True,\n # white_back=False,\n # obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n hit_test_only=hit_test_only,\n need_normal=need_normal,\n use_sphere_tracing=use_sphere_tracing,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n refine_edge_obj_ids=refine_edge_obj_ids,\n render_mask=render_mask,\n extra_dict=extra_chunk,\n render_kwargs=self.render_kwargs_neus,\n )\n\n for k, v in rendered_ray_chunks.items():\n if to_cpu:\n results[k] += [v.cpu()]\n else:\n results[k] += [v]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results\n\n def render_full_scene(\n self,\n pose: np.ndarray,\n idx: int,\n h: int,\n w: int,\n write_idx_on_image=True,\n return_raw_image=False,\n render_mask=False,\n refine_edge=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=False,\n show_progress=False,\n refine_edge_obj_ids=[],\n fovx_deg=0,\n ):\n extra_dict = dict()\n extra_dict[\"compute_3d_mask\"] = False\n extra_dict[\"is_eval\"] = True\n\n rays_list = []\n object_id_list = []\n\n if fovx_deg > 0:\n focal = (w / 2) / np.tan((fovx_deg / 2) / (180 / np.pi))\n print(\"focal =\", focal)\n directions = get_ray_directions(h, w, focal).cuda() # (h, w, 3)\n else:\n directions = get_ray_directions_equirectangular(h, w).cuda() # (h, w, 3)\n\n for obj_id in self.active_instance_id:\n # get object location\n # Two: object to world pose\n if obj_id == 0: # 0 denotes background\n Two = np.eye(4)\n Two[:3, 3] = self.bg_scene_center\n else: # other objects\n Two = torch.eye(4).cuda()\n Two[:3, :3] = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n Two[:3, 3] = self.object_pose_dict[str(obj_id)][\"trans\"]\n Two = Two.detach().cpu().numpy()\n # pose: Twc\n # we need: Toc\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n\n Toc = np.linalg.inv(Two) @ Twc\n\n Toc = torch.from_numpy(Toc).float().cuda()[:3, :4]\n rays_o, rays_d = get_rays(directions, Toc)\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id)\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr or obj_id in self.virtual_instance_id:\n if not hasattr(self, \"hard_code_light_id\"):\n self.hard_coded_light_id = 0\n extra_dict[\"embedding_light_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n\n # optimize exposure\n if self.optimize_exposure and obj_id not in self.virtual_instance_id:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n with torch.cuda.amp.autocast(enabled=True):\n with torch.no_grad():\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n use_sphere_tracing=use_sphere_tracing,\n # use_sphere_tracing=True,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n render_mask=render_mask,\n show_progress=show_progress,\n **extra_dict,\n )\n img = results[f\"rgb_fine\"]\n img_pred = np.clip(img.view(h, w, 3).cpu().numpy(), 0, 1)\n img_pred_ = (img_pred * 255).astype(np.uint8)\n\n if return_raw_image:\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0]\n .cpu()\n .numpy()\n .round()\n .astype(np.uint16)\n )\n return img_pred_, img_mask\n return img_pred_ # raw image in [h, w, 3] np.uint8\n\n if write_idx_on_image:\n img_pred_ = cv2.putText(\n img_pred_,\n \"Iter: {:03d}\".format(idx),\n (20, 20),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.7,\n (255, 0, 0),\n 2,\n )\n\n imageio.imwrite(\n os.path.join(self.output_path, f\"{idx:06d}.multi_obj.png\"), img_pred_\n )\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0].cpu().numpy().round().astype(np.uint16)\n )\n cv2.imwrite(os.path.join(self.output_path, f\"{idx:06d}.seg.png\"), img_mask)\n\n def set_initial_object_poses_from_scene_meta(self, add_noise=True):\n self.object_pose_dict = {}\n\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n if \"gt_T_wo\" in obj_meta_info:\n Two = obj_meta_info[\"gt_T_wo\"]\n else:\n print(\n f\"Cannot find object pose for obj_id = {obj_id}, use custom pose with minor offset.\"\n )\n Two = np.eye(4)\n from scipy.spatial.transform import Rotation as R\n\n rot_fix = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0]).reshape(3, 3)\n # TODO: update initial pose for real-world scenes\n # if obj_id == 31:\n # blender_xyz = np.array([-1.44, 1.18, 0.1])\n # blender_rot = R.from_quat([0.5, -0.5, 0.5, 0.5]).as_matrix()\n # elif obj_id == 32:\n # blender_xyz = np.array([0.76, 0.54, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n # elif obj_id == 33:\n # blender_xyz = np.array([-0.06, 1.01, -0.9])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 34:\n # blender_xyz = np.array([-0.05, 1.14, -0.15])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 35:\n # blender_xyz = np.array([-0.35, 1.1, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n\n # Two[:3, :3] = blender_rot @ rot_fix\n # Two[:3, :3] = rot_fix @ blender_rot\n # Two[:3, 3] = rot_fix @ blender_xyz\n\n # Two[1, 3] += 0.75\n # Two[2, 3] -= 0.7\n\n # add noise\n if add_noise:\n Two[:3, 3] += 0.1\n from scipy.spatial.transform import Rotation as R\n\n rot_noise = R.from_euler(\"z\", 20, degrees=True).as_matrix()\n Two[:3, :3] = Two[:3, :3] @ rot_noise\n Two = torch.from_numpy(Two).float().cuda()\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n if \"fix_object_pose\" not in self.optimize_option:\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_from_prediction(self, pred_json_path):\n print(\"Initial pose from\", pred_json_path)\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n pred_info = read_json(pred_json_path)\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.array(pred_info[str(obj_id)][\"Two\"])\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n\n if not \"fix_object_pose\" in self.optimize_option:\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_as_identity(self):\n print(\"Initial pose as identity.\")\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.eye(4)\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_sampling_mask_from_seg(\n self,\n seg_mask=None,\n seg_mask_path=None,\n add_noise_to_seg=0,\n convert_seg_mask_to_box_mask=False,\n ):\n if seg_mask_path is not None:\n print(\"Read segmentation from gt mask\")\n # read mask\n self.instance_mask = get_instance_mask(seg_mask_path, img_wh=self.img_wh)\n elif seg_mask is not None:\n self.instance_mask = seg_mask\n else:\n print(\"Warning: empty mask\")\n self.merged_mask = (\n np.ones((self.img_wh[1], self.img_wh[0])).reshape(-1).astype(bool)\n )\n return\n\n # merge active object masks\n merged_mask = np.zeros_like(self.instance_mask)\n for i_obj, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue # do not accumulate background obj_id\n instance_mask_obj = self.instance_mask == obj_id\n # use tightly fit bbox instead of segmentation mask\n if convert_seg_mask_to_box_mask:\n instance_mask_obj = seg_mask_to_box_mask(instance_mask_obj)\n merged_mask = np.logical_or(merged_mask, instance_mask_obj)\n\n # if add noise to gt segmentation\n if add_noise_to_seg != 0:\n is_dilate = add_noise_to_seg > 0\n add_noise_to_seg = abs(add_noise_to_seg)\n kernel = np.ones((add_noise_to_seg, add_noise_to_seg), np.uint8)\n if is_dilate:\n merged_mask = cv2.dilate(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n else:\n merged_mask = cv2.erode(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n cv2.imwrite(\n f\"{self.output_path}/merged_mask.png\", merged_mask.astype(np.uint8) * 255\n )\n self.merged_mask = merged_mask.reshape(-1)\n\n def get_type_of_instance(self, instance_id):\n for obj_info in self.scene_meta[\"objs\"]:\n if obj_info[\"id\"] == instance_id:\n return obj_info[\"classname\"]\n return \"unknown\"\n\n def generate_relation(\n self,\n obj_to_room_distance_th: float = 0.5,\n top_down_dist_th: float = 0.3,\n top_down_xy_close_factor: float = 0.8,\n ):\n \"\"\"\n Generate relationship : object-wall, object-floor, object-object\n \"\"\"\n print(\"Start to generate relation from initial poses and neural models...\")\n all_obj_info = {}\n for i, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue\n Rwo = rotation_6d_to_matrix(self.object_pose_dict[str(obj_id)][\"rot6d\"])\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n all_obj_info[str(obj_id)] = optimized_meta\n with torch.no_grad():\n generate_relation_for_all(\n room_optimizer=self,\n all_obj_info=all_obj_info,\n obj_to_room_distance_th=obj_to_room_distance_th,\n top_down_dist_th=top_down_dist_th,\n top_down_xy_close_factor=top_down_xy_close_factor,\n )\n # print(\"Relation:\\n\", self.relation_info)\n for k, v in self.relation_info.items():\n print(k, v)\n\n def optimize(self, input_rgb: torch.Tensor, pose=None):\n \"\"\"\n Inputs:\n input_rgb: torch.Tensor [h, w, 3] normalized in 0...1\n \"\"\"\n if pose is None:\n pose = np.array(self.scene_meta[\"camera\"][\"cam3d2world\"]).reshape(4, 4)\n # Original poses has rotation in form \"right down forward\", change to NDC \"right up back\"\n fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n pose[:3, :3] = pose[:3, :3] @ fix_rot\n\n # camera to world pose\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n Twc = torch.from_numpy(Twc).float().cuda()\n\n if \"keypoint_mask\" in self.optimize_option:\n # detect keypoint for interest region\n keypoint_mask = detect_keypoints(input_rgb.numpy(), circle_radius=5)\n self.merged_mask = np.logical_and(\n keypoint_mask, self.merged_mask.reshape(keypoint_mask.shape)\n )\n cv2.imwrite(\n f\"{self.output_path}/merged_mask_keypoint.png\",\n self.merged_mask.astype(np.uint8) * 255,\n )\n self.merged_mask = self.merged_mask.reshape(-1)\n\n input_rgb = input_rgb.view(-1, 3) # (H*W, 3) RGB\n\n directions = get_ray_directions_equirectangular(\n self.h, self.w\n ).cuda() # (h, w, 3)\n\n mse_loss = nn.MSELoss(reduction=\"none\")\n\n assert hasattr(\n self, \"params\"\n ), \"Please set initial pose params before optimization.\"\n optimizer = torch.optim.Adam(self.params)\n\n scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)\n perceptual_net = perceptual_model.VGG16_for_Perceptual().cuda()\n\n sample_prob = pano_sample_probability(self.h, self.w).reshape(-1)\n\n t = trange(self.N_optim_step, desc=\"Opt.\", leave=True)\n for i_step in t:\n if \"regenerate_relation_during_test\" in self.optimize_option:\n if i_step != 0 and i_step % 50 == 0:\n self.generate_relation()\n if self.adjust_lr_per_step > 0:\n adjust_learning_rate(\n self.lr,\n optimizer,\n i_step,\n base=0.5,\n adjust_lr_every=self.adjust_lr_per_step,\n )\n extra_dict = dict()\n rays_list = []\n object_id_list = []\n # sample according to batch size limitation\n select_ind = np.arange(self.merged_mask.shape[0])[self.merged_mask]\n if (\n \"perceptual_loss\" not in self.optimize_option\n ): # we only sample some points in this case\n # sample according to pano distribution\n select_sample_prob = sample_prob[self.merged_mask]\n select_sample_prob /= select_sample_prob.sum()\n # assert select_ind.shape[0] > self.optim_batch_size\n sample_size = min(select_ind.shape[0], self.optim_batch_size)\n select_ind = np.random.choice(\n select_ind,\n size=sample_size,\n replace=False,\n p=select_sample_prob,\n )\n\n # add some sampling on the background for bg light code\n if self.optimize_light_env:\n bg_sample_ratio = 0.2\n bg_sample_prob = sample_prob[~self.merged_mask]\n bg_sample_prob /= bg_sample_prob.sum()\n bg_sample_ind = np.arange(self.merged_mask.shape[0])[~self.merged_mask]\n # assert bg_sample_ind.shape[0] > self.optim_batch_size\n bg_sample_size = min(\n bg_sample_ind.shape[0], int(bg_sample_ratio * self.optim_batch_size)\n )\n if bg_sample_size > 0:\n bg_sample_ind = np.random.choice(\n bg_sample_ind,\n size=bg_sample_size,\n replace=False,\n p=bg_sample_prob,\n )\n select_ind = np.concatenate([select_ind, bg_sample_ind], axis=-1)\n\n select_ind = np.unique(select_ind)\n if i_step == 0:\n print(\"Actual optimization rays\", select_ind.shape[0])\n select_input_rgb = input_rgb[select_ind].float().cuda()\n\n loss_dict = {}\n all_obj_info = {} # prepare for violation loss\n\n for i, obj_id in enumerate(self.active_instance_id):\n # object to world pose\n if obj_id == 0:\n Rwo = torch.eye(3).cuda()\n two = torch.from_numpy(self.bg_scene_center).float().cuda()\n else:\n Rwo = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n\n # camera to object pose\n Toc = torch.eye(4).cuda()\n Toc[:3, :3] = Rwo.T @ Twc[:3, :3]\n Toc[:3, 3] = Rwo.T @ (Twc[:3, 3] - two)\n\n # generate object rays\n rays_o, rays_d = get_rays(directions, Toc[:3, :4])\n\n rays_o = rays_o[select_ind]\n rays_d = rays_d[select_ind]\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id\n )\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr:\n extra_dict[\n \"embedding_light_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # autoexposure\n if self.optimize_exposure:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n # we do not need to add relation constraints to bg\n if obj_id == 0:\n continue\n\n # enforce optimising on yaw\n if \"z_axis_align_loss\" in self.optimize_option:\n loss_dict[\"z_axis_loss_{}\".format(obj_id)] = (\n z_axis_loss(Rwo, 1.0) * 1e2\n )\n\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n obj_id_key = str(obj_id)\n\n if obj_id_key not in self.relation_info:\n continue\n\n # get obj_relation from input\n obj_relation = self.relation_info[obj_id_key]\n # supplement obj_type\n obj_type = self.get_type_of_instance(obj_id)\n optimized_meta[\"obj_type\"] = obj_type\n\n all_obj_info[str(obj_id)] = optimized_meta\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n \"\"\"attach wall loss\"\"\"\n if (\n \"object_room_wall_attach\" in self.optimize_option\n and obj_relation.get(\"attach_wall\", False)\n ):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n # \"face_direction\": torch.Tensor([0, 1, 0]),\n # \"face_direction\": obj_relation.get(\n # \"attach_wall_face_dir\", torch.Tensor([0, 1, 0])\n # ),\n \"face_direction\": obj_relation[\"attach_wall_face_dir\"],\n \"ray_grid_size\": 10,\n }\n # for door object, we slightly stretch the size to ensure successive hit-test\n if obj_type == \"door\" or obj_type == \"window\":\n kwargs.update(\n {\n \"ray_grid_stretch\": torch.Tensor([1.2, 1.2, 1]),\n \"use_bbox_surface_as_in_detect\": True,\n }\n )\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n \"\"\"attach floor loss\"\"\"\n if (\n \"object_room_floor_attach\" in self.optimize_option\n and obj_relation.get(\"attach_floor\", False)\n ):\n # # TODO(ybbbbt): hard code floor\n # loss_dict.update(\n # obj_attach_floor_loss(optimized_meta, floor=0.0)\n # )\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n \"face_direction\": torch.Tensor([0, 0, -1]),\n \"ray_grid_stretch\": torch.Tensor(\n [0.8, 0.8, 1.0]\n ), # avoid too close to wall\n \"use_bbox_surface_as_in_detect\": True,\n \"ray_grid_size\": 3,\n }\n if obj_type == \"door\":\n # kwargs[\"ray_grid_offset\"] = torch.Tensor(\n # [0, -0.3, 0]\n # ) # to avoid to close to wall\n assert (\n \"attach_wall_face_dir\" in obj_relation\n ), f\"door {obj_id} relation prediction failed.\"\n kwargs[\"ray_grid_offset\"] = (\n obj_relation[\"attach_wall_face_dir\"] * -0.3\n ) # to avoid to close to wall\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n # use_sphere_tracing=True,\n use_sphere_tracing=False,\n **extra_dict,\n )\n pred_rgb = results[\"rgb_fine\"]\n\n if \"photometric_loss\" in self.optimize_option:\n loss_dict[\"mse_loss\"] = mse_loss(pred_rgb, select_input_rgb).mean()\n\n if \"visualize_pred\" in self.optimize_option: # dump image for debug\n # pred_rgb_full = input_rgb.cuda()\n pred_rgb_full = torch.zeros_like(input_rgb.cuda())\n pred_rgb_full[select_ind] = pred_rgb\n\n imageio.imwrite(\n f\"debug/pred_rgb_full.png\",\n (pred_rgb_full * 255)\n .view(self.img_wh[1], self.img_wh[0], 3)\n .detach()\n .cpu()\n .numpy()\n .astype(np.uint8),\n )\n\n if \"perceptual_loss\" in self.optimize_option:\n pred_rgb_full = input_rgb.cuda()\n pred_rgb_full[select_ind] = pred_rgb\n loss_dict.update(\n patch_perceptual_loss(\n perceptual_net,\n pred_rgb_full,\n input_rgb,\n all_obj_info,\n self.instance_mask,\n self.img_wh,\n )\n )\n\n \"\"\"attach bottom to other object loss\"\"\"\n if \"object_object_attach\" in self.optimize_option:\n for obj_id_str, obj_relation in self.relation_info.items():\n if obj_relation.get(\"attach_bottom_to_object\", False):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info_src\": all_obj_info[obj_id_str],\n \"obj_info_tgt\": all_obj_info[\n str(obj_relation[\"attach_tgt_obj_id\"])\n ],\n \"face_direction\": torch.Tensor([0, 0, -1]),\n }\n loss_dict.update(object_object_attach_loss(**kwargs))\n\n # physical violation loss\n if \"physical_violation\" in self.optimize_option:\n if (\n not \"physical_violation_delayed_start\" in self.optimize_option\n or i_step >= 100\n ):\n loss_dict.update(\n physical_violation_loss(\n self,\n all_obj_info,\n N_nearest_obj=3,\n check_background_violation=True,\n # N_sample_points=1000,\n N_sample_points=2000,\n # N_sample_points=300,\n )\n )\n\n if \"viewing_constraint\" in self.optimize_option:\n loss_dict.update(viewing_constraint_loss(self, Twc, all_obj_info))\n\n if \"print_loss_dict\" in self.optimize_option:\n for k, v in loss_dict.items():\n # if \"_62\" not in k:\n # continue\n print(k, \"=\", float(v))\n loss = sum(list(loss_dict.values()))\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n\n t.set_description(\"Loss: %f\" % float(loss))\n t.refresh()\n # dump image\n if i_step % 20 == 0:\n self.save_optimizable_parameters(\n f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n )\n # self.load_optimizable_parameters(\n # f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n # )\n if i_step >= self.N_optim_step - 20:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n write_idx_on_image=False,\n render_mask=True,\n h=512,\n w=1280,\n )\n else:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n render_mask=False,\n h=self.h,\n w=self.w,\n )\n dump_optimization_meta_to_file(\n filepath=f\"{self.output_path}/{i_step:06d}.optim.json\",\n obj_pose_dict=self.object_pose_dict,\n )" }, { "identifier": "read_real_scene_localization", "path": "optim/misc_utils.py", "snippet": "def read_real_scene_localization(pose_path: str, transform_info_json_path: str):\n pose_dict = {}\n transform_info = read_json(transform_info_json_path)\n trans_colmap_to_arkit = np.array(transform_info[\"transform_colmap_to_arkit_sRT\"])\n trans_align = np.array(transform_info[\"transform_alignment\"])\n with open(pose_path) as file:\n lines = file.readlines()\n lines = lines[1:]\n for line in lines:\n fname, tx, ty, tz, qx, qy, qz, qw, _, _ = line.strip().split(\" \")\n fname += \".png\"\n pose = np.eye(4)\n pose[0, 3] = tx\n pose[1, 3] = ty\n pose[2, 3] = tz\n # Twc\n pose[:3, :3] = Rotation.from_quat([qx, qy, qz, qw]).as_matrix()\n # pose = np.linalg.inv(pose)\n # pose_ndc = np.linalg.inv(pose_ndc)\n\n # convert to ndc\n # pose_ndc = pose\n # fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n # pose_ndc[:3, :3] = pose_ndc[:3, :3] @ fix_rot\n\n # transform to arkit pose\n s, R, t = decompose_to_sRT(trans_colmap_to_arkit)\n # pose_ndc = transform_colmap_to_arkit @ pose_ndc\n # print(s, R, t)\n pose[:3, 3] = R @ (pose[:3, 3] * s) + t\n pose[:3, :3] = R @ pose[:3, :3]\n\n # apply alignment to poses\n pose = trans_align @ pose\n\n pose_dict[fname] = {\"pose_slam_Twc\": pose}\n # print(fname, pose)\n return pose_dict" }, { "identifier": "read_real_scene_localization_with_name", "path": "optim/misc_utils.py", "snippet": "def read_real_scene_localization_with_name(arrangement_name):\n localization_info = read_real_scene_localization(\n f\"data/real_room_0/arrangement_panorama_select/{arrangement_name}/traj.txt\",\n \"data/real_room_0/objects/000/background_hloc_neus_normal_converge/transform_info.json\",\n )\n return localization_info" }, { "identifier": "read_testing_config", "path": "optim/misc_utils.py", "snippet": "def read_testing_config():\n conf_cli = OmegaConf.from_cli()\n conf_test_file = OmegaConf.load(conf_cli.config)\n # read dataset config\n conf_test_file[\"dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"dataset_config_path\"]\n )\n conf_test_file[\"bg_dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"bg_dataset_config_path\"]\n )\n\n # processing ckpt\n ckpt_path_dict = {}\n for item in conf_test_file[\"ckpt_lists\"]:\n path = item[\"path\"]\n obj_ids = item[\"obj_ids\"]\n neus_conf = item.get(\"neus_conf\", \"config/neus.yaml\")\n for obj_id in obj_ids:\n ckpt_path_dict[str(obj_id)] = {\"path\": path, \"neus_conf\": neus_conf}\n conf_test_file[\"ckpt_path_dict\"] = ckpt_path_dict\n\n conf_merged = OmegaConf.merge(conf_test_file, conf_cli)\n return conf_merged" } ]
import sys import os import torch import numpy as np from PIL import Image from omegaconf import OmegaConf from optim.room_optimizer import RoomOptimizer from optim.misc_utils import ( read_real_scene_localization, read_real_scene_localization_with_name, read_testing_config, )
15,006
active_instance_id = config.active_instance_id image_path = config.test_image_path dataset_config = config.dataset_config["dataset"] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] img_wh = config.img_wh # read image input_rgb = Image.open(image_path) input_rgb = input_rgb.resize(img_wh, Image.LANCZOS) input_rgb = np.array(input_rgb) input_rgb = torch.from_numpy(input_rgb).float() / 255 # (H, W, 3) # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=config.scene_info_json, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, N_samples=64, N_importance=128, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, # relation_info=relation_info, relation_info={}, output_path="debug", prefix=config.prefix, active_instance_id=active_instance_id, lr=1e-2, # lr=5e-2, N_optim_step=500, adjust_lr_per_step=0, optim_batch_size=1024, # optim_batch_size=2048, # optim_batch_size=4096, # use_amp=False, use_amp=True, optimize_light_env=True, optimize_appearance_code=config.get("optimize_appearance_code", False), mask_per_object=False, bbox_ray_intersect=True, bbox_enlarge=0.1, optimize_option=[ "keypoint_mask", "photometric_loss", # "perceptual_loss", "z_axis_align_loss", "object_room_wall_attach", "object_room_floor_attach", "physical_violation", # "physical_violation_delayed_start", "object_object_attach", "viewing_constraint", "optimize_exposure", "regenerate_relation_during_test", # "visualize_pred", # "print_loss_dict", ], ) # room_optimizer.set_sampling_mask_from_seg( # seg_mask=None, # seg_mask_path=config.seg_mask_path, # # add_noise_to_seg=0, # add_noise_to_seg=5, # dilate mask # convert_seg_mask_to_box_mask=True, # # convert_seg_mask_to_box_mask=False, # ) room_optimizer.set_sampling_mask_from_seg( # seg_mask=torch.ones_like(input_rgb[:, :, 0]).numpy() * 31, seg_mask_path=config.seg_mask_path, # add_noise_to_seg=0, add_noise_to_seg=5, # dilate mask convert_seg_mask_to_box_mask=False, # convert_seg_mask_to_box_mask=False, ) if "obj_prediction_json" in config: room_optimizer.set_initial_pose_from_prediction(config["obj_prediction_json"]) else: room_optimizer.set_initial_object_poses_from_scene_meta() # dump config config["optimize_option"] = room_optimizer.optimize_option OmegaConf.save( config=config, f=os.path.join(room_optimizer.output_path, "optim_config_full.yaml"), ) room_optimizer.generate_relation() real_room_loc = read_real_scene_localization_with_name("arrangement3") # get file stem file_stem = os.path.splitext(os.path.basename(image_path))[0] pose = real_room_loc[file_stem]["pose_slam_Twc"] pose = np.array(pose) room_optimizer.optimize( input_rgb, pose=pose, ) if __name__ == "__main__": """ Usage: python test/test_optim_pano.py config=test/config/ig_bedroom.yml "img_wh=[320,180]" prefix=dbg_bedroom_conf """
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def main(config): active_instance_id = config.active_instance_id image_path = config.test_image_path dataset_config = config.dataset_config["dataset"] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] img_wh = config.img_wh # read image input_rgb = Image.open(image_path) input_rgb = input_rgb.resize(img_wh, Image.LANCZOS) input_rgb = np.array(input_rgb) input_rgb = torch.from_numpy(input_rgb).float() / 255 # (H, W, 3) # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=config.scene_info_json, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, N_samples=64, N_importance=128, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, # relation_info=relation_info, relation_info={}, output_path="debug", prefix=config.prefix, active_instance_id=active_instance_id, lr=1e-2, # lr=5e-2, N_optim_step=500, adjust_lr_per_step=0, optim_batch_size=1024, # optim_batch_size=2048, # optim_batch_size=4096, # use_amp=False, use_amp=True, optimize_light_env=True, optimize_appearance_code=config.get("optimize_appearance_code", False), mask_per_object=False, bbox_ray_intersect=True, bbox_enlarge=0.1, optimize_option=[ "keypoint_mask", "photometric_loss", # "perceptual_loss", "z_axis_align_loss", "object_room_wall_attach", "object_room_floor_attach", "physical_violation", # "physical_violation_delayed_start", "object_object_attach", "viewing_constraint", "optimize_exposure", "regenerate_relation_during_test", # "visualize_pred", # "print_loss_dict", ], ) # room_optimizer.set_sampling_mask_from_seg( # seg_mask=None, # seg_mask_path=config.seg_mask_path, # # add_noise_to_seg=0, # add_noise_to_seg=5, # dilate mask # convert_seg_mask_to_box_mask=True, # # convert_seg_mask_to_box_mask=False, # ) room_optimizer.set_sampling_mask_from_seg( # seg_mask=torch.ones_like(input_rgb[:, :, 0]).numpy() * 31, seg_mask_path=config.seg_mask_path, # add_noise_to_seg=0, add_noise_to_seg=5, # dilate mask convert_seg_mask_to_box_mask=False, # convert_seg_mask_to_box_mask=False, ) if "obj_prediction_json" in config: room_optimizer.set_initial_pose_from_prediction(config["obj_prediction_json"]) else: room_optimizer.set_initial_object_poses_from_scene_meta() # dump config config["optimize_option"] = room_optimizer.optimize_option OmegaConf.save( config=config, f=os.path.join(room_optimizer.output_path, "optim_config_full.yaml"), ) room_optimizer.generate_relation() real_room_loc = read_real_scene_localization_with_name("arrangement3") # get file stem file_stem = os.path.splitext(os.path.basename(image_path))[0] pose = real_room_loc[file_stem]["pose_slam_Twc"] pose = np.array(pose) room_optimizer.optimize( input_rgb, pose=pose, ) if __name__ == "__main__": """ Usage: python test/test_optim_pano.py config=test/config/ig_bedroom.yml "img_wh=[320,180]" prefix=dbg_bedroom_conf """
config = read_testing_config()
3
2023-10-15 08:41:29+00:00
24k
WenzhengZhang/Seq2seqCoref
main_trainer.py
[ { "identifier": "DataArguments", "path": "arguments.py", "snippet": "class DataArguments:\n data_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Path to data directory\"}\n )\n\n max_train_len: Optional[int] = field(\n default=1536,\n metadata={\n \"help\": \"maximum train source input length\"\n },\n )\n max_train_len_out: Optional[int] = field(\n default=2048,\n metadata={\n \"help\": \"maximum train target decoder length\"\n },\n )\n max_eval_len: Optional[int] = field(\n default=1536,\n metadata={\n \"help\": \"maximum dev/test source input length\"\n },\n )\n max_eval_len_out: Optional[int] = field(\n default=2048,\n metadata={\n \"help\": \"maximum dev/test target decode length\"\n },\n )\n\n data_cache_dir: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Where do you want to store the data downloaded from huggingface\"}\n )\n\n beam_sz: Optional[int] = field(\n default=4, metadata={\n \"help\": \"num beams\"\n }\n )\n\n oracle_mentions_dir: Optional[str] = field(\n default=None, metadata={\n \"help\": \"oracle mentions directory\"\n }\n )\n language: Optional[str] = field(\n default='english', metadata={\n \"help\": \"coreference language\"\n }\n )\n joint_data_dirs: Optional[str] = field(\n default=None, metadata={\"help\": \"datasets dirs for joint training\"}\n )\n joint_max_train_lens: Optional[str] = field(\n default=None, metadata={\"help\": \"max train len for each dataset for \"\n \"joint training\"}\n )\n joint_max_eval_lens: Optional[str] = field(\n default=None, metadata={\"help\": \"max eval len for each dataset for \"\n \"joint training\"}\n )\n joint_num_samples: Optional[int] = field(\n default=2000, metadata={\"help\": \"num samples to subsample for joint \"\n \"training\"}\n )" }, { "identifier": "ModelArguments", "path": "arguments.py", "snippet": "class ModelArguments:\n model_name_or_path: str = field(\n default=\"t5-base\",\n metadata={\n \"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n\n config_name: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n\n decay_rate: Optional[float] = field(\n default=0.6, metadata={\"help\": \"Decay learning rate\"}\n )\n low_cpu_mem_usage: Optional[bool] = field(\n default=False, metadata={\"help\": \"low cpu mem usage when load model\"}\n )" }, { "identifier": "CorefTrainingArguments", "path": "arguments.py", "snippet": "class CorefTrainingArguments(Seq2SeqTrainingArguments):\n do_train: bool = field(default=True,\n metadata={\"help\": \"Whether to run training.\"})\n save_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Path to save predicts directory\"}\n )\n save_predicts: Optional[bool] = field(\n default=True, metadata={\"help\": \"whether to save predictions\"}\n )\n mark_sentence: Optional[bool] = field(\n default=False, metadata={\"help\": \"mark sentence end for short target?\"}\n )\n align_mode: Optional[str] = field(\n default='l', metadata={\"help\": \"alignment mode: highroad (h) or \"\n \"lowroad (l) \"}\n )\n optim: Union[OptimizerNames, str] = field(\n default=\"adamw_apex_fused\",\n metadata={\"help\": \"The optimizer to use.\"},\n )\n parallelize_model: Optional[bool] = field(\n default=False, metadata={\"help\": \"whether to enable naive model \"\n \"parallel\"}\n )\n manual_empty_cache: Optional[bool] = field(\n default=False, metadata={\"help\": \"whether to empty cuda cache manually\"}\n )\n is_stage3: Optional[bool] = field(\n default=False, metadata={\"help\": \"use deepspeed stage3 for inference \"\n \"if is stage3\"}\n )\n val_after_train: Optional[bool] = field(\n default=False, metadata={\"help\": \"save the checkpoints then do \"\n \"validation after training\"}\n )\n allow_singletons: Optional[bool] = field(\n default=False, metadata={\n \"help\": \"whether to allow singletons\"\n }\n )\n seq2seq_type: Optional[str] = field(\n default='action', metadata={\n \"help\": \"seq2seq type: action, short_seq, full_seq, tagging, \"\n \"input_feed, action_non_int\"\n }\n )\n action_type: Optional[str] = field(\n default='integer', metadata={\n \"help\": \"target action type: integer, non_integer\"\n }\n )\n do_oracle: Optional[bool] = field(\n default=False, metadata={\n \"help\": \"do oracle experiments or not. Provide (gold) mentions \"\n \"and ask the model to predict coreference predictions\"\n }\n )\n add_mention_end: Optional[bool] = field(\n default=False, metadata={\n \"help\": \"add mention end token when using non-integer action format\"\n }\n )\n joint_data_names: Optional[str] = field(\n default=None, metadata={\"help\": \"datasets names for joint training\"}\n )\n joint_min_num_mentions: Optional[str] = field(\n default=None, metadata={\"help\": \"threshold for num mentions per epoch \"\n \"in joint training for each dataset\"}\n )\n min_num_mentions: Optional[int] = field(\n default=2, metadata={\"help\": \"minimum number of mentions per cluster,\"\n \"ontonotes is 2 other datasets is 1 \"\n \"(allow singletons)\"}\n )\n joint_train: Optional[bool] = field(\n default=False, metadata={\"help\": \"whether to use joint training\"}\n )" }, { "identifier": "CorefDataset", "path": "data.py", "snippet": "class CorefDataset(Dataset):\n\n def __init__(self, tokenizer,\n data_args, train_args, split):\n self.tokenizer = tokenizer\n self.data_args = data_args\n self.train_args = train_args\n self.split = split\n # self.task_prefix = self.data_args.task_prefix\n # convert tokens to ids for each sample\n self.samples, self.doc_labels = self.load_dataset()\n\n def __len__(self):\n return len(self.samples)\n\n def load_dataset(self):\n max_len = self.data_args.max_train_len if self.split == 'train' else \\\n self.data_args.max_eval_len\n data_path = os.path.join(\n self.data_args.data_dir,\n f'{self.split}.t5-small.english.{max_len}.jsonlines')\n samples = []\n doc_labels = {}\n thred = self.train_args.min_num_mentions\n with open(data_path, 'r') as f:\n for line in f:\n item = json.loads(line)\n doc_key = item['doc_key']\n doc_id = re.sub(r'_\\d+$', '', doc_key)\n if self.train_args.action_type == \"integer\":\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item['target_sentence'])\n elif self.train_args.action_type == \"non_integer\":\n if self.train_args.add_mention_end:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_sentence\"])\n else:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_sentence\"])\n else:\n raise ValueError(f\"wrong action type \"\n f\"{self.train_args.action_type}\")\n\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n if self.train_args.action_type == 'integer':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n elif self.train_args.action_type == 'non_integer':\n if self.train_args.add_mention_end:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_action\"])\n else:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_action\"])\n else:\n raise ValueError(\"wrong action type (\"\n \"integer/non_integer)\")\n elif self.train_args.seq2seq_type == 'short_seq':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_short_sentence'])\n elif self.train_args.seq2seq_type == 'full_seq':\n target_seq = deepcopy(target_sent)\n elif self.train_args.seq2seq_type == 'tagging':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n # set the last token as eos token\n target_seq[-1] = self.tokenizer.eos_token_id\n else:\n raise ValueError('wrong seq2seq type')\n sample = {'doc_key': doc_key,\n 'sentence': self.tokenizer.convert_tokens_to_ids(\n item['sentence']),\n 'target_sentence': target_sent,\n 'target_seq': target_seq,\n 'subtoken_map': item['subtoken_map'],\n 'seg_clusters': [[tuple(m) for m in c] for c in item[\n 'seg_clusters'] if len(c) >= thred],\n 'offset': item['offset']\n }\n doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[\n 'gold_clusters']]\n samples.append(sample)\n return samples, doc_labels\n\n def __getitem__(self, index):\n sample = self.samples[index]\n input_ids = torch.tensor(sample['sentence'], dtype=torch.long)\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n label_ids = torch.tensor(sample['target_sentence'],\n dtype=torch.long)\n target_ids = torch.tensor(sample['target_seq'], dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'decoder_labels': label_ids,\n 'labels': target_ids\n }\n else:\n label_ids = torch.tensor(sample['target_seq'],\n dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'labels': label_ids,\n }\n return src_encoding" }, { "identifier": "JointDataset", "path": "data.py", "snippet": "class JointDataset(Dataset):\n\n def __init__(self, tokenizer,\n data_args, train_args, split):\n self.tokenizer = tokenizer\n self.data_args = data_args\n self.train_args = train_args\n self.split = split\n self.all_samples, self.doc_labels, self.id_to_name = self.load_dataset()\n self.samples = None if self.split == 'train' else [\n s for data_samples in self.all_samples.values() for s in\n data_samples\n ]\n\n def __len__(self):\n if self.split == 'train':\n num_samples = 0\n for s in self.all_samples.values():\n num_samples += min(self.data_args.joint_num_samples, len(s))\n else:\n num_samples = len(self.samples)\n return num_samples\n\n def set_samples(self, epoch):\n # subsample larger datasets and then concat them\n sample_seed = self.train_args.seed + epoch\n min_num_samples = min(len(s) for s in self.all_samples.values())\n samples = []\n for data_name, data_samples in self.all_samples.items():\n if len(data_samples) > min_num_samples:\n subsamples = random.Random(sample_seed).sample(\n data_samples, self.data_args.joint_num_samples)\n else:\n subsamples = data_samples\n samples += subsamples\n self.samples = samples\n\n def _load_single_data(self, data_dir,\n data_name,\n max_len,\n thred):\n\n samples = []\n doc_labels = {}\n id_to_name = {}\n data_path = os.path.join(\n data_dir,\n f'{self.split}.t5-small.english.{max_len}.jsonlines')\n with open(data_path, 'r') as f:\n for line in f:\n item = json.loads(line)\n doc_key = item['doc_key']\n doc_id = re.sub(r'_\\d+$', '', doc_key)\n id_to_name[doc_id] = data_name\n if self.train_args.action_type == \"integer\":\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item['target_sentence'])\n elif self.train_args.action_type == \"non_integer\":\n if self.train_args.add_mention_end:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_sentence\"])\n else:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_sentence\"])\n else:\n raise ValueError(f\"wrong action type \"\n f\"{self.train_args.action_type}\")\n\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n if self.train_args.action_type == 'integer':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n elif self.train_args.action_type == 'non_integer':\n if self.train_args.add_mention_end:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_action\"])\n else:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_action\"])\n else:\n raise ValueError(\"wrong action type (\"\n \"integer/non_integer)\")\n elif self.train_args.seq2seq_type == 'short_seq':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_short_sentence'])\n elif self.train_args.seq2seq_type == 'full_seq':\n target_seq = deepcopy(target_sent)\n elif self.train_args.seq2seq_type == 'tagging':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n # set the last token as eos token\n target_seq[-1] = self.tokenizer.eos_token_id\n else:\n raise ValueError('wrong seq2seq type')\n sample = {'doc_key': doc_key,\n 'sentence': self.tokenizer.convert_tokens_to_ids(\n item['sentence']),\n 'target_sentence': target_sent,\n 'target_seq': target_seq,\n 'subtoken_map': item['subtoken_map'],\n 'seg_clusters': [[tuple(m) for m in c] for c in item[\n 'seg_clusters'] if len(c) >= thred],\n 'offset': item['offset']\n }\n doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[\n 'gold_clusters']]\n samples.append(sample)\n return samples, doc_labels, id_to_name\n\n def load_dataset(self):\n doc_labels = {}\n id_to_name = {}\n samples = {}\n max_lens = self.data_args.joint_max_train_lens.split(\n ',') if self.split == 'train' else \\\n self.data_args.joint_max_eval_lens.split(',')\n max_lens = [int(l) for l in max_lens]\n threds = self.train_args.joint_min_num_mentions.split(',')\n threds = [int(t) for t in threds]\n data_dirs = self.data_args.joint_data_dirs.split(',')\n data_names = self.train_args.joint_data_names.split(',')\n for data_dir, data_name, max_len, thred in zip(\n data_dirs, data_names, max_lens, threds):\n single_samples, single_doc_labels, single_id_to_name = \\\n self._load_single_data(data_dir, data_name, max_len, thred)\n samples[data_name] = single_samples\n doc_labels.update(single_doc_labels)\n id_to_name.update(single_id_to_name)\n return samples, doc_labels, id_to_name\n\n def __getitem__(self, index):\n sample = self.samples[index]\n input_ids = torch.tensor(sample['sentence'], dtype=torch.long)\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n label_ids = torch.tensor(sample['target_sentence'],\n dtype=torch.long)\n target_ids = torch.tensor(sample['target_seq'], dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'decoder_labels': label_ids,\n 'labels': target_ids\n }\n else:\n label_ids = torch.tensor(sample['target_seq'],\n dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'labels': label_ids,\n }\n return src_encoding" }, { "identifier": "SPEAKER_START", "path": "constants.py", "snippet": "SPEAKER_START = '<speaker>'" }, { "identifier": "SPEAKER_END", "path": "constants.py", "snippet": "SPEAKER_END = '</speaker>'" }, { "identifier": "MENTION_START", "path": "constants.py", "snippet": "MENTION_START = '<m>'" }, { "identifier": "MENTION_END", "path": "constants.py", "snippet": "MENTION_END = '</m>'" }, { "identifier": "COPY", "path": "constants.py", "snippet": "COPY = '<copy>'" }, { "identifier": "CLUSTER_NEW", "path": "constants.py", "snippet": "CLUSTER_NEW = '</new>'" }, { "identifier": "CLUSTERS", "path": "constants.py", "snippet": "CLUSTERS = []" }, { "identifier": "SENTENCE_START", "path": "constants.py", "snippet": "SENTENCE_START = '<sentence>'" }, { "identifier": "SENTENCE_END", "path": "constants.py", "snippet": "SENTENCE_END = '</sentence>'" }, { "identifier": "SPECIAL_IDS", "path": "constants.py", "snippet": "SPECIAL_IDS = {\n 'speaker_start': int_tokenizer.encode(SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end': int_tokenizer.encode(SPEAKER_END, add_special_tokens=False)[\n 0],\n 'mention_start': int_tokenizer.encode(MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': int_tokenizer.encode(MENTION_END, add_special_tokens=False)[\n 0],\n 'sep': int_tokenizer.encode(SEP_TOKEN, add_special_tokens=False)[0],\n 'copy': int_tokenizer.encode(COPY, add_special_tokens=False)[0],\n 'eos': int_tokenizer.eos_token_id\n}" }, { "identifier": "NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "NON_INT_SPECIAL_IDS = {\n 'speaker_start': non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'cluster_ids': MENTION_ENDS_IDS,\n 'cluster_ids_to_num': END_IDS_TO_NUM,\n 'cluster_new': non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': non_int_tokenizer.eos_token_id\n}" }, { "identifier": "MARK_SPECIAL_IDS", "path": "constants.py", "snippet": "MARK_SPECIAL_IDS = deepcopy(SPECIAL_IDS)" }, { "identifier": "MENTION_END_NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "MENTION_END_NON_INT_SPECIAL_IDS = {\n 'speaker_start': mention_end_non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n mention_end_non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': mention_end_non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': mention_end_non_int_tokenizer.encode(\n MENTION_END,\n add_special_tokens=False)[0],\n 'cluster_ids': CLUSTER_IDS,\n 'cluster_ids_to_num': CLUSTER_IDS_TO_NUM,\n 'cluster_new': mention_end_non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': mention_end_non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': mention_end_non_int_tokenizer.eos_token_id\n}" }, { "identifier": "MENTION_ENDS", "path": "constants.py", "snippet": "MENTION_ENDS = []" }, { "identifier": "CorefTrainer", "path": "trainer.py", "snippet": "class CorefTrainer(Seq2SeqTrainer):\n\n def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:\n if self.args.save_total_limit is None or self.args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime,\n output_dir=output_dir)\n if self.args.val_after_train and self.args.eval_delay < \\\n self.state.global_step:\n for checkpoint in checkpoints_sorted[:-1]:\n states_dir = [str(x) for x in Path(\n checkpoint).glob(f'global_step*') if os.path.isdir(x)]\n for state_dir in states_dir:\n logger.info(f\"Deleting optimizer states of saved \"\n f\"checkpoint {checkpoint}\")\n if os.path.exists(state_dir) and os.path.isdir(\n state_dir):\n shutil.rmtree(state_dir)\n else:\n if len(checkpoints_sorted) <= self.args.save_total_limit:\n return\n\n # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which\n # we don't do to allow resuming.\n save_total_limit = self.args.save_total_limit\n if (\n self.state.best_model_checkpoint is not None\n and self.args.save_total_limit == 1\n and checkpoints_sorted[\n -1] != self.state.best_model_checkpoint\n ):\n save_total_limit = 2\n\n number_of_checkpoints_to_delete = max(0, len(\n checkpoints_sorted) - save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[\n :number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\n f\"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit\")\n shutil.rmtree(checkpoint)\n\n def _save(self, output_dir: Optional[str] = None, state_dict=None):\n # If we are executing this function, we are the process zero, so we don't check for that.\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"Saving model checkpoint to {output_dir}\")\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n if not isinstance(self.model, PreTrainedModel) and not hasattr(\n self.model, 'save_pretrained'):\n if state_dict is None:\n state_dict = self.model.state_dict()\n\n if isinstance(unwrap_model(self.model), PreTrainedModel):\n unwrap_model(self.model).save_pretrained(\n output_dir, state_dict=state_dict,\n # safe_serialization=self.args.save_safetensors\n )\n else:\n logger.info(\n \"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n # if self.args.save_safetensors:\n # safetensors.torch.save_file(state_dict,\n # os.path.join(output_dir,\n # SAFE_WEIGHTS_NAME))\n # else:\n torch.save(state_dict, os.path.join(output_dir,\n WEIGHTS_NAME))\n else:\n self.model.save_pretrained(\n output_dir, state_dict=state_dict,\n # safe_serialization=self.args.save_safetensors\n )\n\n if self.tokenizer is not None:\n self.tokenizer.save_pretrained(output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))\n\n def _inner_training_loop(\n self, batch_size=None, args=None, resume_from_checkpoint=None,\n trial=None, ignore_keys_for_eval=None\n ):\n self._train_batch_size = batch_size\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size\n\n len_dataloader = None\n if has_length(train_dataloader):\n len_dataloader = len(train_dataloader)\n num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n num_examples = self.num_examples(train_dataloader)\n if args.max_steps > 0:\n max_steps = args.max_steps\n num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(\n args.max_steps % num_update_steps_per_epoch > 0\n )\n # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's\n # the best we can do.\n num_train_samples = args.max_steps * total_train_batch_size\n else:\n max_steps = math.ceil(\n args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(args.num_train_epochs)\n num_train_samples = self.num_examples(\n train_dataloader) * args.num_train_epochs\n elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size\n max_steps = args.max_steps\n # Setting a very large number of epochs so we go as many times as necessary over the iterator.\n num_train_epochs = sys.maxsize\n num_update_steps_per_epoch = max_steps\n num_examples = total_train_batch_size * args.max_steps\n num_train_samples = args.max_steps * total_train_batch_size\n else:\n raise ValueError(\n \"args.max_steps must be set to a positive value if dataloader does not have a length, was\"\n f\" {args.max_steps}\"\n )\n\n if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:\n if self.args.n_gpu > 1:\n # nn.DataParallel(model) replicates the model, creating new variables and module\n # references registered here no longer work on other gpus, breaking the module\n raise ValueError(\n \"Currently --debug underflow_overflow is not supported under DP. Please use DDP\"\n \" (torch.distributed.launch).\"\n )\n else:\n debug_overflow = DebugUnderflowOverflow(self.model) # noqa\n\n delay_optimizer_creation = (\n self.sharded_ddp is not None\n and self.sharded_ddp != ShardedDDPOption.SIMPLE\n or is_sagemaker_mp_enabled()\n or self.fsdp is not None\n )\n if args.deepspeed:\n deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(\n self, num_training_steps=max_steps,\n resume_from_checkpoint=resume_from_checkpoint\n )\n self.model = deepspeed_engine.module\n self.model_wrapped = deepspeed_engine\n self.deepspeed = deepspeed_engine\n self.optimizer = optimizer\n self.lr_scheduler = lr_scheduler\n elif not delay_optimizer_creation:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n # Activate gradient checkpointing if needed\n if args.gradient_checkpointing:\n self.model.gradient_checkpointing_enable()\n\n model = self._wrap_model(self.model_wrapped)\n\n if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None:\n self._load_from_checkpoint(resume_from_checkpoint, model)\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n if delay_optimizer_creation:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(resume_from_checkpoint)\n\n # important: at this point:\n # self.model is the Transformers Model\n # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(\n f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(\n f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(\n f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n logger.info(\n f\" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}\"\n )\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n steps_trained_progress_bar = None\n\n # Check if continuing training from a checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(\n os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)\n ):\n self.state = TrainerState.load_from_json(\n os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (\n num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\n \" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(\n f\" Continuing training from global step {self.state.global_step}\")\n if not args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` \"\n \"flag to your launch command, but you will resume the training on data already seen by your model.\"\n )\n if self.is_local_process_zero() and not args.disable_tqdm:\n steps_trained_progress_bar = tqdm(\n total=steps_trained_in_current_epoch)\n steps_trained_progress_bar.set_description(\n \"Skipping the first batches\")\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n if self.hp_name is not None and self._trial is not None:\n # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial\n # parameter to Train when using DDP.\n self.state.trial_name = self.hp_name(self._trial)\n if trial is not None:\n assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial\n self.state.trial_params = hp_params(assignments)\n else:\n self.state.trial_params = None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = self.state.global_step\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(args, self.state,\n self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not args.ignore_data_skip:\n for epoch in range(epochs_trained):\n is_random_sampler = hasattr(train_dataloader,\n \"sampler\") and isinstance(\n train_dataloader.sampler, RandomSampler\n )\n if is_torch_less_than_1_11 or not is_random_sampler:\n # We just need to begin an iteration to create the randomization of the sampler.\n # That was before PyTorch 1.11 however...\n if self.args.joint_train:\n train_dataloader.dataset.set_samples(epoch)\n for _ in train_dataloader:\n break\n else:\n # Otherwise we need to call the whooooole sampler cause there is some random operation added\n # AT THE VERY END!\n _ = list(train_dataloader.sampler)\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n for epoch in range(epochs_trained, num_train_epochs):\n if self.args.joint_train:\n train_dataloader.dataset.set_samples(epoch)\n if isinstance(train_dataloader, DataLoader) and isinstance(\n train_dataloader.sampler, DistributedSampler):\n train_dataloader.sampler.set_epoch(epoch)\n elif hasattr(train_dataloader, \"dataset\") and isinstance(\n train_dataloader.dataset, IterableDatasetShard):\n train_dataloader.dataset.set_epoch(epoch)\n\n if is_torch_tpu_available():\n parallel_loader = pl.ParallelLoader(train_dataloader, [\n args.device]).per_device_loader(args.device)\n epoch_iterator = parallel_loader\n else:\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = (\n len(epoch_iterator)\n if len_dataloader is not None\n else args.max_steps * args.gradient_accumulation_steps\n )\n self.control = self.callback_handler.on_epoch_begin(args,\n self.state,\n self.control)\n\n if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0:\n self._load_rng_state(resume_from_checkpoint)\n\n step = -1\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n for step, inputs in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n if steps_trained_progress_bar is not None:\n steps_trained_progress_bar.update(1)\n if steps_trained_in_current_epoch == 0:\n self._load_rng_state(resume_from_checkpoint)\n continue\n elif steps_trained_progress_bar is not None:\n steps_trained_progress_bar.close()\n steps_trained_progress_bar = None\n\n if step % args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(args,\n self.state,\n self.control)\n # if args.manual_empty_cache:\n # torch.cuda.empty_cache()\n if (\n ((step + 1) % args.gradient_accumulation_steps != 0)\n and args.local_rank != -1\n and args._no_sync_in_gradient_accumulation\n ):\n # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.\n with model.no_sync():\n tr_loss_step = self.training_step(model, inputs)\n else:\n tr_loss_step = self.training_step(model, inputs)\n\n if (\n args.logging_nan_inf_filter\n and not is_torch_tpu_available()\n and (\n torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))\n ):\n # if loss is nan or inf simply add the average of previous logged losses\n tr_loss += tr_loss / (\n 1 + self.state.global_step - self._globalstep_last_logged)\n else:\n tr_loss += tr_loss_step\n\n self.current_flos += float(self.floating_point_ops(inputs))\n\n # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps\n if self.deepspeed:\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n self.deepspeed.step()\n\n if (step + 1) % args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:\n # deepspeed does its own clipping\n\n if self.do_grad_scaling:\n # Reduce gradients first for XLA\n if is_torch_tpu_available():\n gradients = xm._fetch_gradients(self.optimizer)\n xm.all_reduce(\"sum\", gradients,\n scale=1.0 / xm.xrt_world_size())\n # AMP: gradients need unscaling\n self.scaler.unscale_(self.optimizer)\n\n if is_sagemaker_mp_enabled() and args.fp16:\n self.optimizer.clip_master_grads(args.max_grad_norm)\n elif hasattr(self.optimizer, \"clip_grad_norm\"):\n # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping\n self.optimizer.clip_grad_norm(args.max_grad_norm)\n elif hasattr(model, \"clip_grad_norm_\"):\n # Some models (like FullyShardedDDP) have a specific way to do gradient clipping\n model.clip_grad_norm_(args.max_grad_norm)\n else:\n # Revert to normal clipping otherwise, handling Apex or full precision\n nn.utils.clip_grad_norm_(\n amp.master_params(\n self.optimizer) if self.use_apex else model.parameters(),\n args.max_grad_norm,\n )\n\n # Optimizer step\n optimizer_was_run = True\n if self.deepspeed:\n pass # called outside the loop\n elif is_torch_tpu_available():\n if self.do_grad_scaling:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n xm.optimizer_step(self.optimizer)\n elif self.do_grad_scaling:\n scale_before = self.scaler.get_scale()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n scale_after = self.scaler.get_scale()\n optimizer_was_run = scale_before <= scale_after\n else:\n self.optimizer.step()\n\n if optimizer_was_run and not self.deepspeed:\n self.lr_scheduler.step()\n\n model.zero_grad()\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n self.control = self.callback_handler.on_step_end(args,\n self.state,\n self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch,\n ignore_keys_for_eval)\n else:\n self.control = self.callback_handler.on_substep_end(args,\n self.state,\n self.control)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n if step < 0:\n logger.warning(\n \"There seems to be not a single sample in your epoch_iterator, stopping training at step\"\n f\" {self.state.global_step}! This is expected if you're using an IterableDataset and set\"\n f\" num_steps ({max_steps}) higher than the number of available samples.\"\n )\n self.control.should_training_stop = True\n\n self.control = self.callback_handler.on_epoch_end(args, self.state,\n self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch,\n ignore_keys_for_eval)\n\n if DebugOption.TPU_METRICS_DEBUG in self.args.debug:\n if is_torch_tpu_available():\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n else:\n logger.warning(\n \"You enabled PyTorch/XLA debug metrics but you don't have a TPU \"\n \"configured. Check your training configuration if this is unexpected.\"\n )\n if self.control.should_training_stop:\n break\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\n \"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n # Wait for everyone to get here so we are sur the model has been saved by process 0.\n if is_torch_tpu_available():\n xm.rendezvous(\"load_best_model_at_end\")\n elif args.local_rank != -1:\n dist.barrier()\n elif is_sagemaker_mp_enabled():\n smp.barrier()\n\n self._load_best_model()\n\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n train_loss = self._total_loss_scalar / self.state.global_step\n\n metrics = speed_metrics(\"train\", start_time,\n num_samples=num_train_samples,\n num_steps=self.state.max_steps)\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n metrics[\"train_loss\"] = train_loss\n\n self.is_in_train = False\n\n self._memory_tracker.stop_and_update_metrics(metrics)\n\n self.log(metrics)\n\n run_dir = self._get_output_dir(trial)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=False,\n output_dir=run_dir)\n\n # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint.\n if self.state.best_model_checkpoint is not None and \\\n self.args.save_total_limit == 1 and self.is_world_process_zero():\n for checkpoint in checkpoints_sorted:\n if checkpoint != self.state.best_model_checkpoint:\n logger.info(\n f\"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit\")\n shutil.rmtree(checkpoint)\n\n self.control = self.callback_handler.on_train_end(args, self.state,\n self.control)\n\n return TrainOutput(self.state.global_step, train_loss, metrics)\n\n def my_compute_metrics(self,\n doc_labels: Dict[str, List[List]],\n predicts: Any,\n samples: List,\n split: str,\n id_to_name: Dict = None\n ) -> Dict:\n if self.args.joint_train:\n data_names = self.args.joint_data_names.split(',')\n joint_threds = [\n int(t) for t in self.args.joint_min_num_mentions.split(',')]\n name_to_threds = {n: t for n, t in zip(data_names, joint_threds)}\n documents_to_chunk_data = defaultdict(list)\n documents_to_chunk_gold = defaultdict(list)\n predictions = {}\n golds = {}\n assert len(samples) == len(predicts)\n out_sents = []\n last_doc_id = re.sub(r'_\\d+$', '', samples[0]['doc_key'])\n for sample, predict in zip(samples, predicts):\n doc_key = sample['doc_key']\n doc_id = re.sub(r'_\\d+$', '', doc_key)\n # require convert to ids first\n input_ids = sample['sentence']\n subtoken_map = sample['subtoken_map']\n offset = sample['offset']\n # remove bos\n predict_ids = predict[1:].tolist()\n gold_data = sample['seg_clusters']\n if self.args.joint_train:\n thred = name_to_threds[id_to_name[doc_id]]\n else:\n thred = self.args.min_num_mentions\n if self.args.seq2seq_type == \"short_seq\":\n special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \\\n else SPECIAL_IDS\n pred_data, aligned_input_ids, aligned_pred_ids = \\\n parse_short_target_tokens(input_ids, predict_ids,\n special_ids, subtoken_map,\n self.tokenizer,\n self.args.align_mode,\n thred,\n self.args.mark_sentence\n )\n pred_tokens = self.tokenizer.convert_ids_to_tokens(\n predict_ids)\n out_predict = {\n 'doc_key': doc_key,\n 'pred_tokens': pred_tokens,\n 'pred_text': self.tokenizer.convert_tokens_to_string(\n pred_tokens),\n 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens(\n aligned_pred_ids\n ),\n 'input_aligned_text': self.tokenizer.convert_ids_to_tokens(\n aligned_input_ids\n )\n }\n else:\n is_tagging = (self.args.seq2seq_type == 'tagging')\n if self.args.action_type == 'integer':\n pred_data, pred_token_mentions, predict_ids = \\\n parse_int_output_tokens(\n input_ids,\n predict_ids,\n SPECIAL_IDS,\n subtoken_map,\n self.tokenizer,\n thred, is_tagging)\n else:\n special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \\\n self.args.add_mention_end else NON_INT_SPECIAL_IDS\n pred_data, pred_token_mentions, predict_ids = \\\n parse_nonint_output_tokens(\n input_ids,\n predict_ids,\n special_ids,\n subtoken_map,\n self.tokenizer, self.args.add_mention_end,\n thred)\n pred_token_mentions = [(m[0] + offset, m[1] + offset) for m in\n pred_token_mentions]\n pred_tokens = self.tokenizer.convert_ids_to_tokens(\n predict_ids)\n out_predict = {'doc_key': doc_key,\n 'pred_tokens': pred_tokens,\n 'pred_text':\n self.tokenizer.convert_tokens_to_string(\n pred_tokens),\n 'predict_clusters': pred_data,\n 'gold_clusters': gold_data,\n 'predict_token_mentions': pred_token_mentions\n }\n # list of (m1,m2)\n\n documents_to_chunk_data[doc_id].extend(pred_data)\n documents_to_chunk_gold[doc_id].extend(gold_data)\n\n out_sents.append(out_predict)\n if doc_id != last_doc_id:\n predictions[last_doc_id] = get_document_predicts(\n documents_to_chunk_data[\n last_doc_id])\n golds[last_doc_id] = get_document_predicts(\n documents_to_chunk_gold[\n last_doc_id])\n last_doc_id = doc_id\n # final one\n predictions[last_doc_id] = get_document_predicts(\n documents_to_chunk_data[last_doc_id]\n )\n golds[last_doc_id] = get_document_predicts(\n documents_to_chunk_gold[last_doc_id]\n )\n # print(predictions)\n if self.args.joint_train:\n predictions_list = defaultdict(list)\n labels_list = defaultdict(list)\n golds_list = defaultdict(list)\n else:\n predictions_list = []\n labels_list = []\n golds_list = []\n for document_id, doc_label in doc_labels.items():\n if self.args.joint_train:\n predictions_list[id_to_name[document_id]].append(\n predictions[document_id])\n labels_list[id_to_name[document_id]].append(doc_label)\n golds_list[id_to_name[document_id]].append(golds[document_id])\n else:\n predictions_list.append(predictions[document_id])\n labels_list.append(doc_label)\n golds_list.append(golds[document_id])\n if self.args.joint_train:\n label_results = {}\n gold_results = {}\n for dn in predictions_list.keys():\n metrics = CorefAllMetrics().get_all_metrics(\n labels_list[dn],\n predictions_list[dn])\n metrics_golds = CorefAllMetrics().get_all_metrics(\n golds_list[dn],\n predictions_list[dn])\n single_label_results = {\n f'{dn}_{metric_name}_{x}': v\n for metric_name, metric_values in metrics['micro'].items()\n for x, v in metric_values.items()\n }\n single_gold_results = {\n f'{dn}_gold_{metric_name}_{x}': v\n for metric_name, metric_values in\n metrics_golds['micro'].items()\n for x, v in metric_values.items()\n }\n label_results.update(single_label_results)\n gold_results.update(single_gold_results)\n\n else:\n metrics = CorefAllMetrics().get_all_metrics(labels_list,\n predictions_list)\n metrics_golds = CorefAllMetrics().get_all_metrics(golds_list,\n predictions_list)\n label_results = {\n f'{metric_name}_{x}': v\n for metric_name, metric_values in metrics['micro'].items()\n for x, v in metric_values.items()\n }\n gold_results = {\n f'gold_{metric_name}_{x}': v\n for metric_name, metric_values in metrics_golds['micro'].items()\n for x, v in metric_values.items()\n }\n results = {**label_results, **gold_results}\n if self.args.joint_train:\n avg_f1s = [results[f\"{dname}_average_f1\"] for dname in\n data_names]\n results[\"average_f1\"] = sum(avg_f1s) / len(avg_f1s)\n if self.is_world_process_zero() and self.args.save_predicts:\n os.makedirs(self.args.save_dir, exist_ok=True)\n save_path = os.path.join(self.args.save_dir,\n f'{split}-predicts.txt')\n results_path = os.path.join(self.args.save_dir,\n f'{split}-results.json')\n with open(save_path, 'w') as f:\n for p in out_sents:\n f.write('%s\\n' % json.dumps(p))\n with open(results_path, 'w') as f:\n json.dump(results, f)\n\n return results\n\n def evaluation_loop(\n self,\n dataloader: DataLoader,\n description: str,\n prediction_loss_only: Optional[bool] = False,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> EvalLoopOutput:\n \"\"\"\n Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.\n Works both with or without labels.\n \"\"\"\n args = self.args\n\n prediction_loss_only = False\n\n # if eval is called w/o train init deepspeed here\n if args.deepspeed and not self.deepspeed:\n # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval\n # from the checkpoint eventually\n deepspeed_engine, _, _ = deepspeed_init(\n self, num_training_steps=0, resume_from_checkpoint=None,\n inference=is_deepspeed_zero3_enabled()\n )\n self.model = deepspeed_engine.module\n self.model_wrapped = deepspeed_engine\n self.deepspeed = deepspeed_engine\n if self.args.gradient_checkpointing:\n self.model.config.use_cache = True\n model = self._wrap_model(self.model, training=False,\n dataloader=dataloader)\n\n # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called\n # while ``train`` is running, cast it to the right dtype first and then put on device\n if not self.is_in_train:\n if args.fp16_full_eval:\n model = model.to(dtype=torch.float16, device=args.device)\n elif args.bf16_full_eval:\n model = model.to(dtype=torch.bfloat16, device=args.device)\n\n batch_size = self.args.eval_batch_size\n\n logger.info(f\"***** Running {description} *****\")\n if has_length(dataloader):\n logger.info(f\" Num examples = {self.num_examples(dataloader)}\")\n else:\n logger.info(\" Num examples: Unknown\")\n logger.info(f\" Batch size = {batch_size}\")\n\n model.eval()\n\n self.callback_handler.eval_dataloader = dataloader\n # Do this before wrapping.\n eval_dataset = getattr(dataloader, \"dataset\", None)\n\n if is_torch_tpu_available():\n dataloader = pl.ParallelLoader(dataloader,\n [args.device]).per_device_loader(\n args.device)\n\n if args.past_index >= 0:\n self._past = None\n\n # Initialize containers\n # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)\n losses_host = None\n preds_host = None\n labels_host = None\n inputs_host = None\n\n # losses/preds/labels on CPU (final containers)\n all_losses = None\n all_preds = None\n all_labels = None\n all_inputs = None\n # Will be useful when we have an iterable dataset so don't know its length.\n\n observed_num_examples = 0\n # Main evaluation loop\n for step, inputs in enumerate(dataloader):\n # Update the observed num examples\n observed_batch_size = find_batch_size(inputs)\n if observed_batch_size is not None:\n observed_num_examples += observed_batch_size\n # For batch samplers, batch_size is not known by the dataloader in advance.\n if batch_size is None:\n batch_size = observed_batch_size\n\n # Prediction step\n loss, logits, labels = self.prediction_step(model, inputs,\n prediction_loss_only,\n ignore_keys=ignore_keys)\n inputs_decode = self._prepare_input(inputs[\n \"input_ids\"]) if args.include_inputs_for_metrics else None\n\n if is_torch_tpu_available():\n xm.mark_step()\n\n # Update containers on host\n if loss is not None:\n losses = self._nested_gather(loss.repeat(batch_size))\n losses_host = losses if losses_host is None else torch.cat(\n (losses_host, losses), dim=0)\n if labels is not None:\n labels = self._pad_across_processes(labels)\n labels = self._nested_gather(labels)\n labels_host = labels if labels_host is None else nested_concat(\n labels_host, labels, padding_index=-100)\n if inputs_decode is not None:\n inputs_decode = self._pad_across_processes(inputs_decode)\n inputs_decode = self._nested_gather(inputs_decode)\n inputs_host = (\n inputs_decode\n if inputs_host is None\n else nested_concat(inputs_host, inputs_decode,\n padding_index=-100)\n )\n if logits is not None:\n logits = self._pad_across_processes(logits)\n logits = self._nested_gather(logits)\n if self.preprocess_logits_for_metrics is not None:\n logits = self.preprocess_logits_for_metrics(logits, labels)\n preds_host = logits if preds_host is None else nested_concat(\n preds_host, logits, padding_index=-100)\n self.control = self.callback_handler.on_prediction_step(args,\n self.state,\n self.control)\n\n # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.\n if args.eval_accumulation_steps is not None and (\n step + 1) % args.eval_accumulation_steps == 0:\n if losses_host is not None:\n losses = nested_numpify(losses_host)\n all_losses = losses if all_losses is None else np.concatenate(\n (all_losses, losses), axis=0)\n if preds_host is not None:\n logits = nested_numpify(preds_host)\n all_preds = logits if all_preds is None else nested_concat(\n all_preds, logits, padding_index=-100)\n if inputs_host is not None:\n inputs_decode = nested_numpify(inputs_host)\n all_inputs = (\n inputs_decode\n if all_inputs is None\n else nested_concat(all_inputs, inputs_decode,\n padding_index=-100)\n )\n if labels_host is not None:\n labels = nested_numpify(labels_host)\n all_labels = (\n labels if all_labels is None else nested_concat(\n all_labels, labels, padding_index=-100)\n )\n\n # Set back to None to begin a new accumulation\n losses_host, preds_host, inputs_host, labels_host = None, None, None, None\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of the evaluation loop\n delattr(self, \"_past\")\n\n # Gather all remaining tensors and put them back on the CPU\n if losses_host is not None:\n losses = nested_numpify(losses_host)\n all_losses = losses if all_losses is None else np.concatenate(\n (all_losses, losses), axis=0)\n if preds_host is not None:\n logits = nested_numpify(preds_host)\n all_preds = logits if all_preds is None else nested_concat(\n all_preds, logits, padding_index=-100)\n if inputs_host is not None:\n inputs_decode = nested_numpify(inputs_host)\n all_inputs = (\n inputs_decode if all_inputs is None else nested_concat(\n all_inputs, inputs_decode, padding_index=-100)\n )\n if labels_host is not None:\n labels = nested_numpify(labels_host)\n all_labels = labels if all_labels is None else nested_concat(\n all_labels, labels, padding_index=-100)\n\n # Number of samples\n if has_length(eval_dataset):\n num_samples = len(eval_dataset)\n # The instance check is weird and does not actually check for the type, but whether the dataset has the right\n # methods. Therefore we need to make sure it also has the attribute.\n elif isinstance(eval_dataset, IterableDatasetShard) and getattr(\n eval_dataset, \"num_examples\", 0) > 0:\n num_samples = eval_dataset.num_examples\n else:\n if has_length(dataloader):\n num_samples = self.num_examples(dataloader)\n else: # both len(dataloader.dataset) and len(dataloader) fail\n num_samples = observed_num_examples\n if num_samples == 0 and observed_num_examples > 0:\n num_samples = observed_num_examples\n\n # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of\n # samplers has been rounded to a multiple of batch_size, so we truncate.\n if all_losses is not None:\n all_losses = all_losses[:num_samples]\n if all_preds is not None:\n all_preds = nested_truncate(all_preds, num_samples)\n if all_labels is not None:\n all_labels = nested_truncate(all_labels, num_samples)\n if all_inputs is not None:\n all_inputs = nested_truncate(all_inputs, num_samples)\n\n # Metrics!\n doc_labels = eval_dataset.doc_labels\n eval_samples = eval_dataset.samples\n split = eval_dataset.split\n if self.args.joint_train:\n doc_id_to_name = eval_dataset.id_to_name\n else:\n doc_id_to_name = None\n # allow_singletons = eval_dataset.data_args.allow_singletons\n assert all_preds is not None\n metrics = self.my_compute_metrics(doc_labels, all_preds,\n eval_samples, split,\n doc_id_to_name)\n # if all_preds is not None and doc_labels is not None:\n # metrics = self.get_eval_metrics(doc_labels, all_preds,\n # eval_samples, split)\n # else:\n # metrics = {}\n\n # To be JSON-serializable, we need to remove numpy types or zero-d tensors\n metrics = denumpify_detensorize(metrics)\n\n if all_losses is not None:\n metrics[f\"{metric_key_prefix}_loss\"] = all_losses.mean().item()\n\n # Prefix all keys with metric_key_prefix + '_'\n for key in list(metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = metrics.pop(key)\n if self.args.gradient_checkpointing:\n self.model.config.use_cache = False\n return EvalLoopOutput(predictions=all_preds, label_ids=all_labels,\n metrics=metrics, num_samples=num_samples)\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on `model` using `inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (`nn.Module`):\n The model to evaluate.\n inputs (`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument `labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (`bool`):\n Whether or not to return the loss only.\n ignore_keys:\n list of ignore keys\n\n Return:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).\n \"\"\"\n\n if not self.args.predict_with_generate or prediction_loss_only:\n return super().prediction_step(\n model, inputs, prediction_loss_only=prediction_loss_only,\n ignore_keys=ignore_keys\n )\n\n has_labels = \"labels\" in inputs\n inputs = self._prepare_inputs(inputs)\n\n # XXX: adapt synced_gpus for fairscale as well\n gen_kwargs = self._gen_kwargs.copy()\n gen_kwargs[\"max_length\"] = (\n gen_kwargs[\"max_length\"] if gen_kwargs.get(\n \"max_length\") is not None else self.model.config.max_length\n )\n gen_kwargs[\"num_beams\"] = (\n gen_kwargs[\"num_beams\"] if gen_kwargs.get(\n \"num_beams\") is not None else self.model.config.num_beams\n )\n default_synced_gpus = True if is_deepspeed_zero3_enabled() else False\n gen_kwargs[\"synced_gpus\"] = (\n gen_kwargs[\"synced_gpus\"] if gen_kwargs.get(\n \"synced_gpus\") is not None else default_synced_gpus\n )\n\n if \"attention_mask\" in inputs:\n gen_kwargs[\"attention_mask\"] = inputs.get(\"attention_mask\", None)\n if \"global_attention_mask\" in inputs:\n gen_kwargs[\"global_attention_mask\"] = inputs.get(\n \"global_attention_mask\", None)\n\n # prepare generation inputs\n # some encoder-decoder models can have varying encoder's and thus\n # varying model input names\n if hasattr(self.model,\n \"encoder\") and self.model.encoder.main_input_name != self.model.main_input_name:\n generation_inputs = inputs[self.model.encoder.main_input_name]\n else:\n generation_inputs = inputs[self.model.main_input_name]\n # add our logits_processor here\n if self.args.seq2seq_type != 'short_seq':\n if self.args.action_type == 'non_integer':\n special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \\\n self.args.add_mention_end else NON_INT_SPECIAL_IDS\n gen_kwargs['logits_processor'] = LogitsProcessorList(\n [NonIntProcessor(generation_inputs, special_ids,\n self.args.seq2seq_type,\n self.args.add_mention_end)])\n else:\n gen_kwargs['logits_processor'] = LogitsProcessorList(\n [IntProcessor(generation_inputs, SPECIAL_IDS,\n self.args.seq2seq_type)])\n elif self.args.mark_sentence:\n gen_kwargs['logits_processor'] = LogitsProcessorList(\n [ShortSeqProcessor(generation_inputs, MARK_SPECIAL_IDS)])\n # if self.args.use_peft:\n # gen_kwargs[\"input_ids\"] = generation_inputs\n # gen_kwargs[\"use_cache\"] = True\n # generated_tokens = self.model.generate(\n # **gen_kwargs,\n # )\n # else:\n generated_tokens = self.model.generate(\n generation_inputs,\n **gen_kwargs,\n )\n # in case the batch is shorter than max length, the output should be padded\n if generated_tokens.shape[-1] < gen_kwargs[\"max_length\"]:\n generated_tokens = self._pad_tensors_to_max_len(generated_tokens,\n gen_kwargs[\n \"max_length\"])\n\n with torch.no_grad():\n with self.compute_loss_context_manager():\n outputs = model(**inputs)\n if has_labels:\n if self.label_smoother is not None:\n loss = self.label_smoother(outputs,\n inputs[\"labels\"]).mean().detach()\n else:\n loss = (outputs[\"loss\"] if isinstance(outputs, dict) else\n outputs[0]).mean().detach()\n else:\n loss = None\n\n if self.args.prediction_loss_only:\n return (loss, None, None)\n\n if has_labels:\n labels = inputs[\"labels\"]\n if labels.shape[-1] < gen_kwargs[\"max_length\"]:\n labels = self._pad_tensors_to_max_len(labels,\n gen_kwargs[\"max_length\"])\n else:\n labels = None\n\n return (loss, generated_tokens, labels)" }, { "identifier": "ConstrainedDataCollator", "path": "data.py", "snippet": "class ConstrainedDataCollator:\n \"\"\"\n Data collator that will dynamically pad the inputs received, as well as the labels.\n\n Args:\n tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):\n The tokenizer used for encoding the data.\n model ([`PreTrainedModel`]):\n The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to\n prepare the *decoder_input_ids*\n\n This is useful when using *label_smoothing* to avoid calculating loss twice.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n is provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (`int`, *optional*):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n label_pad_token_id (`int`, *optional*, defaults to -100):\n The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).\n return_tensors (`str`):\n The type of Tensor to return. Allowable values are \"np\", \"pt\" and \"tf\".\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n model: Optional[Any] = None\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n label_pad_token_id: int = -100\n return_tensors: str = \"pt\"\n\n def __call__(self, features, return_tensors=None):\n import numpy as np\n\n if return_tensors is None:\n return_tensors = self.return_tensors\n labels = [feature[\"labels\"] for\n feature in features] if \"labels\" in features[\n 0].keys() else None\n decoder_labels = [feature[\"decoder_labels\"] for\n feature in features] if \"decoder_labels\" in features[\n 0].keys() else None\n # We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the\n # same length to return tensors.\n if labels is not None:\n assert decoder_labels is not None\n max_label_length = max(len(l) for l in labels)\n if self.pad_to_multiple_of is not None:\n max_label_length = (\n (max_label_length + self.pad_to_multiple_of - 1)\n // self.pad_to_multiple_of\n * self.pad_to_multiple_of\n )\n\n padding_side = self.tokenizer.padding_side\n for feature in features:\n remainder = [self.label_pad_token_id] * (\n max_label_length - len(feature[\"labels\"]))\n if isinstance(feature[\"labels\"], list):\n feature[\"labels\"] = (\n feature[\n \"labels\"] + remainder if padding_side == \"right\"\n else remainder + feature[\"labels\"]\n )\n feature[\"decoder_labels\"] = (\n feature[\n \"decoder_labels\"] + remainder if padding_side ==\n \"right\"\n else remainder + feature[\"decoder_labels\"]\n )\n elif padding_side == \"right\":\n feature[\"labels\"] = np.concatenate(\n [feature[\"labels\"], remainder]).astype(np.int64)\n feature[\"decoder_labels\"] = np.concatenate(\n [feature[\"decoder_labels\"], remainder]).astype(np.int64)\n else:\n feature[\"labels\"] = np.concatenate(\n [remainder, feature[\"labels\"]]).astype(np.int64)\n feature[\"decoder_labels\"] = np.concatenate(\n [remainder, feature[\"decoder_labels\"]]).astype(np.int64)\n\n features = self.tokenizer.pad(\n features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n return_tensors=return_tensors,\n )\n\n # prepare decoder_input_ids\n if (\n labels is not None\n and self.model is not None\n and hasattr(self.model, \"prepare_decoder_input_ids_from_labels\")\n ):\n decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(\n labels=features[\"decoder_labels\"])\n features[\"decoder_input_ids\"] = decoder_input_ids\n if self.model.is_input_feed:\n decoder_input_actions = \\\n self.model.prepare_decoder_input_ids_from_labels(\n labels=features[\"labels\"])\n features[\"decoder_input_actions\"] = decoder_input_actions\n del features[\"decoder_labels\"]\n return features" }, { "identifier": "ConstrainedT5", "path": "model.py", "snippet": "class ConstrainedT5(T5ForConditionalGeneration):\n\n def __init__(self, config: T5Config, special_ids: Dict,\n seq2seq_type: str, action_type: str,\n add_mention_end: bool):\n super().__init__(config)\n self.mention_start = special_ids['mention_start']\n self.mention_end = special_ids.get('mention_end',None)\n self.eos_id = special_ids['eos']\n self.action_type = action_type\n self.add_mention_end = add_mention_end\n self.cluster_ids = None\n self.copy_id = special_ids['copy']\n self.seq2seq_type = seq2seq_type\n if action_type == 'integer':\n self.sep = special_ids['sep']\n self.ent_ids = special_ids['integers'] + [\n special_ids['mention_end']]\n self.specials = [self.mention_start, self.sep,\n self.copy_id] + self.ent_ids\n # self.seq2seq_type = seq2seq_type\n else:\n self.cluster_new = special_ids['cluster_new']\n self.cluster_ids = special_ids['cluster_ids']\n self.eos_id = special_ids['eos']\n if self.add_mention_end:\n self.specials = [self.mention_start,\n self.mention_end,\n self.cluster_new,\n self.copy_id] + self.cluster_ids\n else:\n self.specials = [self.mention_start,\n self.cluster_new,\n self.copy_id] + self.cluster_ids\n if self.seq2seq_type == 'tagging':\n self.specials.append(self.eos_id)\n self.is_input_feed = (self.seq2seq_type == \"input_feed\")\n\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput,\n config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.BoolTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n decoder_input_actions: Optional[torch.LongTensor] = None,\n full_decoder_input_ids: Optional[torch.LongTensor] = None\n ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,\n config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for\n labels in `[0, ..., config.vocab_size]`\n\n Returns:\n\n \"\"\"\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\n if head_mask is not None and decoder_head_mask is None:\n if self.config.num_layers == self.config.num_decoder_layers:\n warnings.warn(HEAD_MASK_WARNING_MSG, FutureWarning)\n decoder_head_mask = head_mask\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n # Convert encoder inputs in embeddings if needed\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(\n encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(\n encoder_outputs) > 2 else None,\n )\n\n hidden_states = encoder_outputs[0]\n\n if self.model_parallel:\n torch.cuda.set_device(self.decoder.first_device)\n\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\n # get decoder inputs from shifting lm labels to the right\n decoder_input_ids = self._shift_right(labels)\n # Set device for model parallelism\n if self.is_input_feed and not self.training and decoder_input_actions is None:\n decoder_input_actions = self.input_to_actions(\n full_decoder_input_ids)\n if self.model_parallel:\n torch.cuda.set_device(self.decoder.first_device)\n hidden_states = hidden_states.to(self.decoder.first_device)\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids.to(\n self.decoder.first_device)\n if attention_mask is not None:\n attention_mask = attention_mask.to(self.decoder.first_device)\n if decoder_attention_mask is not None:\n decoder_attention_mask = decoder_attention_mask.to(\n self.decoder.first_device)\n if self.is_input_feed and decoder_input_actions is \\\n not None:\n decoder_input_actions = decoder_input_actions.to(\n self.decoder.first_device\n )\n if self.is_input_feed:\n decoder_token_embeds = self.decoder.embed_tokens(decoder_input_ids)\n if not self.training and past_key_values is not None:\n decoder_action_embeds = self.decoder.embed_tokens(\n decoder_input_actions[:, -1:])\n else:\n decoder_action_embeds = self.decoder.embed_tokens(\n decoder_input_actions)\n decoder_inputs_embeds = decoder_token_embeds / 2 + decoder_action_embeds / 2\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids if not self.is_input_feed else None,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_values=past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = decoder_outputs[0]\n\n # Set device for model parallelism\n if self.model_parallel:\n torch.cuda.set_device(self.encoder.first_device)\n self.lm_head = self.lm_head.to(self.encoder.first_device)\n sequence_output = sequence_output.to(self.lm_head.weight.device)\n\n if self.config.tie_word_embeddings:\n # Rescale output before projecting on vocab\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\n sequence_output = sequence_output * (self.model_dim ** -0.5)\n\n lm_logits = self.lm_head(sequence_output)\n masks = torch.ones_like(lm_logits,\n dtype=torch.bool)\n masks[:, :, self.specials] = False\n lm_logits.masked_fill_(masks, -float('inf'))\n\n loss = None\n if labels is not None:\n # construct constrained mask here\n\n loss_fct = CrossEntropyLoss(ignore_index=-100)\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(\n -1)), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=loss,\n logits=lm_logits,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n input_ids,\n past=None,\n attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n use_cache=None,\n encoder_outputs=None,\n **kwargs\n ):\n\n # cut decoder_input_ids if past is used\n if past is not None:\n cut_input_ids = input_ids[:, -1:]\n else:\n cut_input_ids = input_ids\n\n return {\n \"decoder_input_ids\": cut_input_ids,\n \"past_key_values\": past,\n \"encoder_outputs\": encoder_outputs,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache,\n \"full_decoder_input_ids\": input_ids\n }\n\n def input_to_actions(self, input_ids: torch.LongTensor):\n # input_ids : B x L\n input_actions = deepcopy(input_ids)\n if self.action_type == 'integer':\n is_sep = (input_ids == self.sep)\n is_end = (input_ids == self.mention_end)\n is_start = (input_ids == self.mention_start)\n is_ent = (is_sep.cumsum(-1) - is_end.cumsum(-1)).bool()\n is_copy = ((~is_start) & (~is_ent) & (~is_end))\n else:\n cluster_ids = self.cluster_ids.to(input_ids.device)\n is_not_cid = torch.isin(input_ids, cluster_ids, invert=True)\n is_not_start = (input_ids != self.mention_start)\n if self.add_mention_end:\n is_not_end = (input_ids != self.mention_end)\n is_copy = (is_not_start & is_not_end & is_not_cid)\n else:\n is_copy = (is_not_start & is_not_cid)\n input_actions[:, 1:][is_copy[:, 1:]] = self.copy_id\n return input_actions" } ]
import logging import os import sys from transformers import HfArgumentParser, set_seed from transformers import AutoModelForSeq2SeqLM, \ DataCollatorForSeq2Seq, AutoConfig, AutoTokenizer from transformers.integrations import TensorBoardCallback from arguments import DataArguments, ModelArguments, CorefTrainingArguments \ as TrainingArguments from data import CorefDataset, JointDataset from constants import SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, \ COPY, CLUSTER_NEW, CLUSTERS, SENTENCE_START, SENTENCE_END, SPECIAL_IDS, \ NON_INT_SPECIAL_IDS, MARK_SPECIAL_IDS, MENTION_END_NON_INT_SPECIAL_IDS, \ MENTION_ENDS from trainer import CorefTrainer from data import ConstrainedDataCollator from model import ConstrainedT5
20,557
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY]) elif training_args.action_type == "non_integer": if training_args.add_mention_end: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY, CLUSTER_NEW] + CLUSTERS) else: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, COPY, CLUSTER_NEW] + MENTION_ENDS) else: raise ValueError(f"wrong action type {training_args.action_type}") if training_args.seq2seq_type == 'short_seq' and \ training_args.mark_sentence: num_new_tokens += tokenizer.add_tokens([SENTENCE_START, SENTENCE_END]) # we need to resize model token embeddings config = AutoConfig.from_pretrained(model_args.model_name_or_path) if training_args.gradient_checkpointing: # use_cache is False for training, True for evaluation config.use_cache = False if training_args.seq2seq_type == 'action' or training_args.seq2seq_type \ == 'tagging' or training_args.seq2seq_type == 'input_feed': if training_args.action_type == "integer": special_ids = SPECIAL_IDS elif training_args.action_type == "non_integer": if training_args.add_mention_end:
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY]) elif training_args.action_type == "non_integer": if training_args.add_mention_end: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY, CLUSTER_NEW] + CLUSTERS) else: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, COPY, CLUSTER_NEW] + MENTION_ENDS) else: raise ValueError(f"wrong action type {training_args.action_type}") if training_args.seq2seq_type == 'short_seq' and \ training_args.mark_sentence: num_new_tokens += tokenizer.add_tokens([SENTENCE_START, SENTENCE_END]) # we need to resize model token embeddings config = AutoConfig.from_pretrained(model_args.model_name_or_path) if training_args.gradient_checkpointing: # use_cache is False for training, True for evaluation config.use_cache = False if training_args.seq2seq_type == 'action' or training_args.seq2seq_type \ == 'tagging' or training_args.seq2seq_type == 'input_feed': if training_args.action_type == "integer": special_ids = SPECIAL_IDS elif training_args.action_type == "non_integer": if training_args.add_mention_end:
special_ids = MENTION_END_NON_INT_SPECIAL_IDS
17
2023-10-17 17:39:16+00:00
24k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It includes methods for preprocessing, resizing, and normalizing audio data.\n Subclasses may override these methods to implement dataset-specific processing and resizing logic.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initialize an AudioDataset instance.\n\n Args:\n data_config (DictConfig): Configuration for loading the dataset, including paths, audio properties, etc.\n split (str): Specifies which split of the dataset to load (e.g., 'train', 'validation', 'test').\n evaluation (bool, optional): Indicates whether the dataset is for evaluation purposes. Defaults to False.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def normalize_audio(audio_np: np.ndarray, sample_rate: int) -> np.ndarray:\n \"\"\"Normalize the amplitude of the audio data to a standard range.\n\n This method utilizes PyDub's effects module to perform audio normalization.\n\n Args:\n audio_np (np.ndarray): Audio data represented as a NumPy array.\n sample_rate (int): The sample rate of the audio data.\n\n Returns:\n np.ndarray: The normalized audio data as a NumPy array.\n \"\"\"\n # Convert numpy array to AudioSegment\n audio_segment = AudioSegment(audio_np.tobytes(), frame_rate=int(sample_rate), sample_width=2, channels=1)\n\n # Normalize with PyDub\n normalized_audio_segment = effects.normalize(audio_segment)\n\n # Convert back to numpy\n normalized_audio_np = np.array(normalized_audio_segment.get_array_of_samples())\n\n return normalized_audio_np\n\n def _resize_op(self, audio: tf.Tensor, size: int) -> tf.Tensor:\n \"\"\"Resize the input audio to a specified size and normalize its amplitude to the range [0, 1].\n\n If the audio length is less than the specified size, zero padding is applied to reach the desired size.\n If the audio length is greater, it is truncated to the specified size.\n\n Args:\n audio (tf.Tensor): Input audio data as a TensorFlow tensor.\n size (int): The target size for the audio data.\n\n Returns:\n tf.Tensor: The resized and normalized audio data as a TensorFlow tensor.\n \"\"\"\n # Normalize dataset\n pylogger.info(\"Normalizing audio...\")\n audio = tf.cast(audio, dtype=tf.int16)\n # Calculate current length of the audio\n pylogger.info(\"Resizing audio to size {}...\".format(size))\n audio_length = tf.shape(audio)[0]\n audio = tf.cond(\n audio_length < size,\n lambda: tf.concat([audio, tf.zeros(size - audio_length, dtype=audio.dtype)], axis=0),\n lambda: audio[:size],\n )\n audio_np = tf.numpy_function(self.normalize_audio, [audio, self.data_config.audio_sample_rate], tf.int16)\n audio = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n audio = tf.cast(audio, dtype=tf.float32)\n pylogger.info(\"Converting audio to range [-1, 1]...\")\n max_intensity = self.data_config.audio_max_intensity\n audio = audio / max_intensity\n return audio\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocess the input audio data.\n\n This method resizes the audio data to a specified size based on the dataset configuration and normalizes the amplitude to the range [-1, +1].\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input audio data and any associated metadata.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed audio data and any associated metadata.\n \"\"\"\n pylogger.info(\"Preprocessing audios for split {}...\".format(self.split))\n audio = self._resize_op(\n audio=d[\"audio\"], size=int(self.data_config.audio_sample_rate * self.data_config.audio_max_duration)\n )\n audio = tf.reshape(\n tensor=audio,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Audio reshaped to shape {}...\".format(audio.shape))\n return dict(data=audio, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Postprocess the output audio data.\n\n This method applies the inverse of the preprocessing steps to revert the audio data to its original form.\n\n Args:\n batch_data (Any): A batch of audio data to postprocess.\n inverse_scaler (Callable): A function that applies the inverse of the preprocessing steps.\n\n Returns:\n Any: A batch of postprocessed audio data.\n \"\"\"\n max_intensity = self.data_config.audio_max_intensity\n batch_audio = inverse_scaler(batch_data)\n batch_audio = batch_audio * max_intensity\n batch_post_processed = tf.cast(batch_audio, tf.int16)\n audio_np = tf.numpy_function(\n self.normalize_audio, [batch_post_processed, self.data_config.audio_sample_rate], tf.int16\n )\n batch_post_processed = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n return batch_post_processed" }, { "identifier": "ImageDataset", "path": "src/functional_diffusion_processes/datasets/image_dataset.py", "snippet": "class ImageDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for handling image datasets.\n\n Provides a structured way to load, preprocess, and post-process image data.\n This class can be extended to handle specific image datasets as required.\n\n Attributes:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initializes the ImageDataset object with dataset configurations.\n\n Args:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def _resize_op(image: Any, size: int) -> Any:\n \"\"\"Resizes the input image to the specified size and normalizes its values to the range [0,1].\n\n Args:\n image (Any): A tensor representing the input image.\n size (int): The target size for each dimension of the output image.\n\n Returns:\n Any: A tensor representing the resized and normalized image.\n \"\"\"\n # convert to range [0,1]\n pylogger.info(\"Converting image to range [0,1]...\")\n image = tf.image.convert_image_dtype(image=image, dtype=tf.float32)\n\n # resize to size\n pylogger.info(\"Resizing image to size {}...\".format(size))\n\n image = tf.image.resize(images=image, size=[size, size])\n\n return image\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocesses the input data by resizing, possibly flipping, and applying uniform dequantization.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data with keys 'image' and optionally 'label'.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data, with keys 'data' and optionally 'label'.\n \"\"\"\n image = self._resize_op(image=d[\"image\"], size=self.data_config.image_width_size)\n\n pylogger.info(\"Preprocessing images for split {}...\".format(self.split))\n\n if self.data_config.random_flip and not self.evaluation:\n pylogger.info(\"Applying random flips...\")\n image = tf.image.random_flip_left_right(image=image, seed=self.data_config.seed)\n\n if self.data_config.uniform_dequantization:\n pylogger.info(\"Applying uniform dequantization...\")\n image = (\n tf.random.uniform(shape=image.shape, dtype=tf.float32, seed=self.data_config.seed) + image * 255.0\n ) / 256.0\n\n image = tf.reshape(\n tensor=image,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Image reshaped to shape {}...\".format(image.shape))\n\n return dict(data=image, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Post-processes the output data by reverting the preprocessing steps.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to invert the scaling applied to the data.\n\n Returns:\n Any: A batch of postprocessed data, arranged in a grid for visualization.\n \"\"\"\n batch_post_processed = make_grid_image(\n ndarray=process_images(images=batch_data),\n inverse_scaler=inverse_scaler,\n )\n return batch_post_processed" }, { "identifier": "BaseDataset", "path": "src/functional_diffusion_processes/datasets/base_dataset.py", "snippet": "class BaseDataset(abc.ABC):\n \"\"\"Abstract base class for defining datasets.\n\n Provides a template for loading, preprocessing, and iterating over datasets.\n It encapsulates common dataset configurations and operations while allowing for dataset-specific\n preprocessing and post-processing through abstract methods.\n\n Attributes:\n dataset_builder: A builder object for loading the dataset.\n data_config (DictConfig): Configuration parameters for the dataset.\n split (str): Specifies which split of the dataset to load, e.g., 'train', 'validation', or 'test'.\n evaluation (bool): Indicates whether the dataset is for evaluation purposes.\n dataset_options: Options for configuring the dataset pipeline.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Abstract base class for defining datasets.\n\n This class provides a skeleton for defining datasets, with abstract methods for\n preprocessing data, generating batches of data, and resizing images. Subclasses\n must implement these methods to define their specific datasets.\n\n Args:\n data_config (DictConfig): A dictionary-like object containing the configuration for\n loading the dataset.\n\n split (str): A string specifying which split of the dataset to load.\n\n evaluation (bool): A boolean specifying whether the dataset is for evaluation purposes.\n \"\"\"\n self.dataset_builder = None\n self.data_config = data_config\n self.split = split\n self.evaluation = evaluation\n self.dataset_options = tf.data.Options()\n self.dataset_options.experimental_optimization.map_parallelization = True\n self.dataset_options.experimental_threading.private_threadpool_size = 48\n self.dataset_options.experimental_threading.max_intra_op_parallelism = 1\n\n @abc.abstractmethod\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Abstract method for preprocessing input data.\n\n Subclasses should override this method to implement dataset-specific preprocessing.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data.\n \"\"\"\n raise NotImplementedError(\"Subclasses must implement preprocess_fn method.\")\n\n @abc.abstractmethod\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Abstract method for postprocessing output data.\n\n Subclasses should override this method to implement dataset-specific post-processing.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to inverse the scaling of the data.\n\n Returns:\n Any: A dictionary containing the postprocessed data.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the postprocess_fn method.\")\n\n def _generator(self) -> Iterator[Any]:\n \"\"\"Generate batches of preprocessed data.\n\n Loads the dataset, shuffles the data, applies preprocessing, and batches the data.\n Subclasses might override this method to implement dataset-specific batching logic.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n # load the dataset\n if isinstance(self.dataset_builder, tfds.core.DatasetBuilder):\n read_config = tfds.ReadConfig(options=self.dataset_options)\n if self.data_config.download:\n self.dataset_builder.download_and_prepare()\n ds = self.dataset_builder.as_dataset(\n split=self.split,\n shuffle_files=False,\n read_config=read_config,\n as_supervised=False,\n )\n else:\n ds = self.dataset_builder.with_options(options=self.dataset_options)\n\n ds = ds.shuffle(buffer_size=10000, seed=self.data_config.seed)\n\n # apply the preprocessing function to each element in the dataset\n ds = ds.map(map_func=self.preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # determine the batch size per device\n ds = ds.batch(batch_size=self.data_config.batch_size, drop_remainder=True)\n ds = ds.batch(batch_size=jax.device_count(), drop_remainder=True)\n\n ds = ds.repeat(count=100000 if not self.evaluation else 1)\n\n return iter(ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE))\n\n def __iter__(self) -> Iterator[Any]:\n \"\"\"Return an iterator that generates batches of preprocessed data.\n\n Calls the `_generator` method to obtain an iterator for generating preprocessed data batches.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n return self._generator()\n\n def __len__(self) -> int:\n \"\"\"Return the number of examples in the dataset.\n\n Obtains the total number of examples in the specified dataset split from the dataset builder's info attribute.\n\n Returns:\n int: The number of examples in the dataset.\n \"\"\"\n return self.dataset_builder.info.splits[self.split].num_examples" }, { "identifier": "Loss", "path": "src/functional_diffusion_processes/losses/base_loss.py", "snippet": "class Loss(abc.ABC):\n \"\"\"Abstract class representing a loss function.\n\n Provides a framework for defining custom loss functions by enforcing the implementation\n of `construct_loss_fn` method in any derived classes. This class holds a reference to\n a stochastic differential equation (SDE) object which is used to calculate the weight factor for the loss.\n\n Attributes:\n sde (SDE): The stochastic differential equation instance associated with this loss.\n \"\"\"\n\n def __init__(self, sde: SDE) -> None:\n \"\"\"Initializes the Loss instance with a given SDE.\n\n Args:\n sde (SDE): An SDE instance which might be used in the loss computation.\n \"\"\"\n self.sde = sde\n\n def construct_loss_fn(self, model: Any) -> Callable:\n \"\"\"Abstract method to construct a loss function for a given model.\n\n This method should be implemented by any derived class to define the loss\n computation specific to the type of loss being implemented.\n\n Args:\n model (Any): The model for which to construct the loss function.\n\n Returns:\n Callable: A callable representing the constructed loss function.\n\n Raises:\n NotImplementedError: If the method is not implemented by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the construct_loss_fn method.\")" }, { "identifier": "FIDMetric", "path": "src/functional_diffusion_processes/metrics/fid_metric.py", "snippet": "class FIDMetric:\n \"\"\"Class for computing the Frechet Inception Distance (FID) metric.\n\n This class facilitates the computation of the FID metric, which measures the similarity between two distributions of images.\n It precomputes features for the real dataset using a specified Inception feature extractor and provides methods to compute\n and store features for generated images, and to compute the FID and Inception Score (IS).\n\n Attributes:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n generated_pools (list): List to store features of generated images.\n generated_logits (list): List to store logits of generated images.\n real_features (dict): Dictionary to store precomputed features of real dataset.\n \"\"\"\n\n def __init__(\n self,\n metric_config: DictConfig,\n feature_extractor: InceptionFeatureExtractor,\n dataset: BaseDataset,\n ) -> None:\n \"\"\"Initializes the FIDMetric class with specified configurations, feature extractor, and dataset.\n\n Args:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n \"\"\"\n self.metric_config = metric_config\n self.feature_extractor = feature_extractor\n self.dataset = dataset\n self.generated_pools = []\n self.generated_logits = []\n try:\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n except FileNotFoundError:\n self._precompute_features(\n dataset_name=metric_config.dataset_name,\n save_path=metric_config.real_features_path,\n )\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n\n def _precompute_features(self, dataset_name: str, save_path: str) -> None:\n \"\"\"Precomputes and saves features for the real dataset.\n\n Args:\n dataset_name (str): Name of the dataset.\n save_path (str): Path where the computed features will be saved.\n \"\"\"\n tf.io.gfile.makedirs(path=save_path)\n\n tf.io.gfile.makedirs(os.path.join(save_path, f\"{dataset_name.lower()}_clean\"))\n\n # Use the feature extractor to compute features for the real dataset\n all_pools = self.feature_extractor.extract_features(\n dataset=self.dataset, save_path=save_path, dataset_name=dataset_name\n )\n\n # Save latent represents of the Inception network to disk or Google Cloud Storage\n filename = f\"{dataset_name.lower()}_stats.npz\"\n\n if jax.host_id() == 0:\n pylogger.info(\"Saving real dataset stats to: %s\" % os.path.join(save_path, filename))\n\n with tf.io.gfile.GFile(os.path.join(save_path, filename), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=all_pools)\n f_out.write(io_buffer.getvalue())\n\n def compute_fid(self, eval_dir, num_sampling_round) -> Tuple[float, float]:\n \"\"\"Computes the FID and Inception Score (IS) for the generated and real images.\n\n Args:\n eval_dir (str): Directory path for evaluation.\n num_sampling_round (int): Number of sampling rounds.\n\n Returns:\n Tuple[float, float]: A tuple containing the FID and Inception Score.\n \"\"\"\n real_pools = self.real_features[\"pool_3\"]\n if not self.feature_extractor.inception_v3 and not self.feature_extractor.inception_v3 == \"lenet\":\n if len(self.generated_logits) == 0 or len(self.generated_pools) == 0:\n if jax.host_id() == 0:\n # Load all statistics that have been previously computed and saved for each host\n for host in range(jax.host_count()):\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n wait_message = False\n while len(stats) < num_sampling_round:\n if not wait_message:\n print(\"Waiting for statistics on host %d\" % (host,))\n wait_message = True\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n time.sleep(10)\n\n for stat_file in stats:\n with tf.io.gfile.GFile(stat_file, \"rb\") as fin:\n stat = np.load(fin)\n\n self.generated_pools.append(stat[\"pool_3\"])\n self.generated_logits.append(stat[\"logits\"])\n\n all_logits = np.concatenate(self.generated_logits, axis=0)[: self.metric_config.num_samples]\n inception_score = tfgan.eval.classifier_score_from_logits(logits=all_logits)\n else:\n inception_score = -1\n\n all_pools = np.concatenate(self.generated_pools, axis=0)[: self.metric_config.num_samples]\n\n fid = tfgan.eval.frechet_classifier_distance_from_activations(activations1=real_pools, activations2=all_pools)\n\n return fid, inception_score\n\n def compute_and_store_generated_features(self, images: Any, sample_dir: str, round_num: int) -> None:\n \"\"\"Computes features for the generated images and stores them in a specified directory.\n\n Args:\n images (Any): Tensor representing the generated images.\n sample_dir (str): Directory where the features will be stored.\n round_num (int): Round number in the training process.\n \"\"\"\n latents = self.feature_extractor.extract_features(images)\n\n self.generated_pools.append(latents[\"pool_3\"])\n\n gc.collect()\n\n if self.feature_extractor.model_name == \"inception\" or self.feature_extractor.inception_v3:\n self.generated_logits.append(latents[\"logits\"])\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(\n io_buffer,\n pool_3=latents[\"pool_3\"],\n logits=latents[\"logits\"],\n )\n\n f_out.write(io_buffer.getvalue())\n\n elif self.feature_extractor.model_name == \"lenet\":\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=latents[\"pool_3\"])\n f_out.write(io_buffer.getvalue())" }, { "identifier": "Sampler", "path": "src/functional_diffusion_processes/samplers/base_sampler.py", "snippet": "class Sampler(abc.ABC):\n \"\"\"Abstract base class for creating sampler objects.\n\n This class serves as a template for creating sampler objects which are\n designed to generate samples of a stochastic process governed by a\n specified stochastic differential equation (SDE). The process of sampling\n is carried out by employing specified predictor and corrector methods.\n\n Attributes:\n predictor (Predictor): The predictor method to be used in the sampling process.\n corrector (Corrector): The corrector method to be used in the sampling process.\n sde (SDE): The stochastic differential equation governing the process to be sampled.\n sampler_config (DictConfig): Configuration settings for the sampler.\n\n Methods:\n make_sampler(predict_fn: Callable) -> Callable:\n Abstract method to create a sampling function based on the specified predictor,\n corrector, and SDE.\n \"\"\"\n\n def __init__(self, predictor: Predictor, corrector: Corrector, sde: SDE, sampler_config: DictConfig) -> None:\n \"\"\"Initializes the Sampler object with specified predictor, corrector, SDE, and configuration.\n\n Args:\n predictor (Predictor): The predictor method for the sampler.\n corrector (Corrector): The corrector method for the sampler.\n sde (SDE): The stochastic differential equation governing the process.\n sampler_config (DictConfig): Configuration settings for the sampler.\n \"\"\"\n super().__init__()\n self.predictor = predictor\n self.corrector = corrector\n self.sampler_config = sampler_config\n self.sde = sde\n\n def make_sampler(self, predict_fn: Callable, auxiliary_fn: Union[Any, Callable]) -> Callable:\n \"\"\"Abstract method to create a sampler function.\n\n This method is intended to be overridden by derived classes to provide\n specific implementations for creating a sampler function. The sampler\n function will utilize the specified predictor and corrector methods\n along with the provided SDE to generate samples of the stochastic process.\n\n Args:\n predict_fn (Callable): The model prediction function.\n auxiliary_fn (Callable): The auxiliary prediction function for the model.\n\n Returns:\n Callable: The constructed sampling function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the make_sampler method.\")" }, { "identifier": "SDE", "path": "src/functional_diffusion_processes/sdetools/base_sde.py", "snippet": "class SDE(abc.ABC):\n \"\"\"Abstract base class for representing Stochastic Differential Equations (SDEs).\n\n This class provides a structured way to define and work with SDEs, including computing\n Fourier transforms, discretizing the equations, and defining the drift and diffusion terms.\n\n Attributes:\n sde_config (DictConfig): Configuration object containing SDE settings.\n T (float): Total time duration.\n N (int): Number of time steps.\n eps (float): Small constant for numerical stability.\n is_unidimensional (bool): Flag indicating if the SDE is unidimensional.\n \"\"\"\n\n def __init__(self, sde_config: DictConfig) -> None:\n \"\"\"Initializes the SDE with the given configuration.\n\n Args:\n sde_config (DictConfig): Configuration object containing SDE settings.\n \"\"\"\n super().__init__()\n self.sde_config = sde_config\n self.T = self.sde_config.T\n self.N = self.sde_config.N\n self.eps = self.sde_config.eps\n self.is_unidimensional = True if len(self.sde_config.shape) == 1 else False\n\n def fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.fft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.fft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n def inverse_fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the inverse Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose inverse Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Inverse Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.ifft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.ifft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n @abc.abstractmethod\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Abstract method to compute the drift and diffusion terms of the SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the drift and diffusion terms of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the sde method.\")\n\n @abc.abstractmethod\n def marginal_prob(\n self,\n rng: PRNGKeyArray,\n x: jnp.ndarray,\n t: jnp.ndarray,\n t0: Optional[jnp.ndarray] = None,\n ) -> Tuple[Any, jnp.ndarray | Any]:\n \"\"\"Computes the marginal probability density at a given time.\n\n This is an abstract method that should be overridden by subclasses to\n compute the marginal probability density based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): State of the system.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[Any, jnp.ndarray | Any]: Marginal probability density at the given time.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the marginal_prob method.\")\n\n @abc.abstractmethod\n def diffuse(\n self, rng: PRNGKeyArray, x: jnp.ndarray, t: jnp.ndarray, t0: Optional[jnp.ndarray] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Performs diffusion of the input from time t0 to time t.\n\n This is an abstract method that should be overridden by subclasses to\n implement the diffusion process based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): Input state.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Mean of the corrupted input and the corrupted input.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the diffuse method.\")\n\n @abc.abstractmethod\n def prior_sampling(\n self, rng: PRNGKeyArray, shape: Tuple[int, ...], t0: Optional[jnp.ndarray] = None\n ) -> jnp.ndarray:\n \"\"\"Generates a sample from the prior distribution of the SDE.\n\n This is an abstract method that should be overridden by subclasses to\n implement the prior sampling process based on the shape and initial time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the sample to be generated.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n jnp.ndarray: A sample from the prior distribution of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the prior_sampling method.\")\n\n @abc.abstractmethod\n def score_fn(\n self, y_corrupted: jnp.ndarray, y_reconstructed: jnp.ndarray, t: jnp.ndarray, rng: Optional[PRNGKeyArray] = None\n ) -> jnp.ndarray:\n \"\"\"Computes the score function based on the corrupted and reconstructed states.\n\n This is an abstract method that should be overridden by subclasses to\n compute the score function based on the state and time.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n y_reconstructed (jnp.ndarray): Reconstructed state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n\n Returns:\n jnp.ndarray: The score function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the score_fn method.\")\n\n @abc.abstractmethod\n def get_psm(self, t: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Power-Special-Matrix(PSM) used as a weighting factor for the loss.\n\n This is an abstract method that should be overridden by subclasses to\n compute the state-dependent diffusion matrix based on the time.\n\n Args:\n t (jnp.ndarray): Current time.\n\n Returns:\n jnp.ndarray: The state-dependent diffusion matrix.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_psm method.\")\n\n @abc.abstractmethod\n def get_reverse_noise(self, rng: PRNGKeyArray, shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Generates noise for the reverse SDE.\n\n This is an abstract method that should be overridden by subclasses to\n generate reverse noise based on the shape.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the noise to be generated.\n\n Returns:\n jnp.ndarray: The reverse noise.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_reverse_noise method.\")\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the SDE into an iterative update rule.\n\n This method computes the discrete drift and diffusion terms based on the continuous SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the discrete drift and diffusion terms.\n \"\"\"\n dt = (self.T - self.eps) / self.N\n drift, diffusion = self.sde(y_corrupted, t, y_reconstructed)\n f = drift * dt\n g = diffusion * jnp.sqrt(dt)\n return f, g\n\n def reverse(self):\n \"\"\"Creates a reverse-time version of the current SDE.\n\n This method defines a nested class for the reverse-time SDE and returns an instance of it.\n\n Returns:\n ReverseSDE: An instance of the reverse-time SDE subclass.\n \"\"\"\n num_time_steps = self.N\n end_t = self.T\n sde_fn = self.sde\n discretize_fn = self.discretize\n score_fn = self.score_fn\n sde_config = self.sde_config\n\n class ReverseSDE(self.__class__, abc.ABC):\n \"\"\"Reverse Stochastic Differential Equation abstract base class.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the ReverseSDE class.\n\n Inherits the properties from the original SDE class and overrides the relevant methods for the\n reverse-time SDE.\n \"\"\"\n super().__init__(sde_config)\n self.N = num_time_steps\n self.T = end_t\n self.score_fn = score_fn\n\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Return the drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the reverse-time SDE.\n \"\"\"\n drift, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = -drift + batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n # Set the diffusion function to zero for ODEs.\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the reverse-time SDE in the form of an iterative update rule.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the discretized reverse-time SDE.\n \"\"\"\n f, g = discretize_fn(y_corrupted, t, y_corrupted)\n rev_f = -f + batch_mul(\n g**2,\n self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n * (0.5 if self.sde_config.probability_flow else 1.0),\n )\n rev_g = jnp.zeros_like(g) if self.sde_config.probability_flow else g\n return rev_f, rev_g\n\n def semi_analytic(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Computes the semi-analytic drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the semi-analytic reverse-time SDE.\n \"\"\"\n _, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n return ReverseSDE()" }, { "identifier": "filter_mask", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def filter_mask(shape, radius):\n device_num, batch_size, rows, cols, n_channels = shape\n crow, ccol = int(rows / 2), int(cols / 2)\n center = [crow, ccol]\n x, y = jnp.ogrid[:rows, :cols]\n mask_area = (x - center[0]) ** 2 + (y - center[1]) ** 2 >= radius * radius\n mask = jnp.ones_like(mask_area)\n mask = jnp.where(mask_area, 0, mask)\n mask = mask.reshape(1, 1, rows, cols, 1)\n mask = jnp.repeat(mask, device_num, axis=0)\n mask = jnp.repeat(mask, batch_size, axis=1)\n mask = jnp.repeat(mask, n_channels, axis=4)\n return mask" }, { "identifier": "make_grid_image", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def make_grid_image(ndarray: Any, inverse_scaler: Callable, padding: int = 2, pad_value: float = 0.0) -> Any:\n \"\"\"Make a grid image from a Numpy Array.\n\n Args:\n ndarray: The Numpy Array.\n inverse_scaler: The inverse scaler.\n padding: The padding.\n pad_value: The padding value.\n\n Returns:\n The grid image.\n \"\"\"\n ndarray = jnp.asarray(ndarray)\n\n if ndarray.ndim == 4 and ndarray.shape[-1] == 1: # single-channel images\n ndarray = jnp.concatenate((ndarray, ndarray, ndarray), -1)\n\n n_row = int(np.sqrt(ndarray.shape[0]))\n # make the mini-batch of images into a grid\n n_maps = ndarray.shape[0]\n x_maps = min(n_row, n_maps)\n ymaps = int(math.ceil(float(n_maps) / x_maps))\n height, width = int(ndarray.shape[1] + padding), int(ndarray.shape[2] + padding)\n num_channels = ndarray.shape[3]\n grid = np.full((height * ymaps + padding, width * x_maps + padding, num_channels), pad_value).astype(np.float32)\n k = 0\n for y in range(ymaps):\n for x in range(x_maps):\n if k >= n_maps:\n break\n grid[\n y * height + padding : (y + 1) * height,\n x * width + padding : (x + 1) * width,\n ] = ndarray[k]\n k = k + 1\n\n ndarr = inverse_scaler(grid)\n ndarr = jnp.clip(ndarr * 255, 0, 255).astype(jnp.uint8)\n return ndarr" }, { "identifier": "process_images", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def process_images(images: Any) -> Any:\n \"\"\"Reshape images to the correct shape.\n\n Args:\n images: Tensor of images to reshape.\n\n Returns:\n A tensor of images with the correct shape.\n \"\"\"\n w = np.sqrt(images.shape[2]).astype(int)\n h = np.sqrt(images.shape[2]).astype(int)\n o = images.shape[3]\n return images.reshape(-1, w, h, o)" }, { "identifier": "save_samples", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def save_samples(round_num: int, samples: Any, file_path: str) -> None:\n \"\"\"Save samples to a file.\n\n Args:\n round_num: The round number of the evaluation.\n samples: Tensor of samples to save.\n file_path: string of the Path to the file where the samples will be saved.\n \"\"\"\n for i in range(samples.shape[0]):\n clean_path = os.path.join(file_path, f\"clean/samples_{round_num}_{i}.npy\")\n np.save(clean_path, samples[i])\n samples_path = os.path.join(file_path, f\"samples_{round_num}.npz\")\n with tf.io.gfile.GFile(samples_path, \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, samples=samples)\n f_out.write(io_buffer.getvalue())" }, { "identifier": "to_grayscale", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "@jax.pmap\ndef to_grayscale(images):\n weights = np.array([0.2989, 0.5870, 0.1140])[None, None, None, :] # Extend dimensions\n grayscale_images = np.sum(images * weights, axis=-1)\n return grayscale_images" }, { "identifier": "get_data_inverse_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_inverse_scaler(is_centered: bool) -> Callable:\n \"\"\"Inverse data normalizer.\n\n Rescale data to original range at the end of the diffusion.\n\n Args:\n is_centered: boolean if True data will rescaled from [-1, 1] to [0, 1].\n \"\"\"\n if is_centered:\n # Rescale [-1, 1] to [0, 1]\n return lambda x: (x + 1.0) / 2.0\n else:\n return lambda x: x" }, { "identifier": "get_data_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_scaler(is_centered: bool) -> Callable:\n \"\"\"Normalize data. Assume data are always in [0, 1].\n\n Args:\n is_centered: boolean if True data will be centered in [-1, 1].\n \"\"\"\n if is_centered:\n # Rescale to [-1, 1]\n return lambda x: x * 2.0 - 1.0\n else:\n return lambda x: x" }, { "identifier": "TrainState", "path": "src/functional_diffusion_processes/utils/training_state.py", "snippet": "class TrainState(train_state.TrainState):\n \"\"\"The training state for the model.\"\"\"\n\n opt_state_params: Any\n ema_params: Any\n rng: jax.random.PRNGKey" }, { "identifier": "colorizing_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def colorizing_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, gray_scale_img: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform colorizing task on a given grayscale image.\n\n Args:\n sample_fn (Callable): The sampling function used for colorization.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for colorization.\n gray_scale_img (jnp.ndarray): The grayscale image to be colorized.\n\n Returns:\n Tuple: The updated state and the colorized image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, gray_scale_img)" }, { "identifier": "construct_sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_sampling_fn(model: flax.linen.Module, sampler: Sampler) -> Callable:\n \"\"\"Construct a sampling function for generating samples from the model.\n\n Args:\n model (flax.linen.Module): The model instance from which to generate samples.\n sampler (Sampler): The sampler instance used for sampling.\n\n Returns:\n Callable: The constructed sampling function.\n \"\"\"\n predict_fn = model.make_predict_fn()\n if isinstance(model, BaseMAML):\n super_resolution_fn = model.make_super_resolution_fn()\n sample_fn = sampler.make_sampler(predict_fn, super_resolution_fn)\n else:\n sample_fn = sampler.make_sampler(predict_fn, None)\n return sample_fn" }, { "identifier": "construct_train_step", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_train_step(optimizer, loss_fn) -> Callable:\n \"\"\"Construct a train step function to be used in the training loop.\n\n This function creates a training step function which, when called, performs\n a single step of training including forward pass, loss computation, and\n backward pass for gradient computation and updates.\n\n Args:\n optimizer: The optimizer instance used for updating model parameters.\n loss_fn: The loss function used for computing the loss.\n\n Returns:\n Callable: The constructed train step function.\n \"\"\"\n\n @partial(jax.pmap, axis_name=\"device\")\n def train_fn(\n rng,\n params,\n optim_params,\n step,\n batch_input,\n batch,\n ):\n grad_params, (new_rng, loss, loss_inner, batch_reconstructed, batch_corrupted, target) = loss_fn(\n rng, params, step, batch_input, batch\n )\n\n loss = jax.lax.pmean(loss, axis_name=\"device\")\n grad_params = jax.lax.pmean(grad_params, axis_name=\"device\")\n\n updates, optim_params = optimizer.update(grad_params, optim_params, params)\n\n params = optax.apply_updates(params, updates)\n params = clip_learning_rates(params)\n return new_rng, loss, loss_inner, params, optim_params, batch_reconstructed, batch_corrupted, target\n\n return train_fn" }, { "identifier": "inpainting_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def inpainting_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, image: jnp.ndarray, mask: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform inpainting task on a given image using a mask.\n\n Args:\n sample_fn (Callable): The sampling function used for inpainting.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for inpainting.\n image (jnp.ndarray): The image to be inpainted.\n mask (jnp.ndarray): The mask used for inpainting.\n\n Returns:\n Tuple: The updated state and the inpainted image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, image, mask)" }, { "identifier": "sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def sampling_fn(sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray) -> Tuple:\n \"\"\"Perform sampling task using a given sampling function.\n\n Args:\n sample_fn (Callable): The sampling function.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for sampling.\n\n Returns:\n Tuple: The updated state after performing the sampling.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params)" } ]
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
14,752
keep=np.inf, ) if self.logging.use_wandb: wandb_model_artifact_name = str(step_) + "_" + run.id wandb_model = wandb.Artifact(wandb_model_artifact_name, type="model") wandb_model.add_file(checkpoint_file) run.log_artifact(wandb_model) # noinspection PyProtectedMember def train(self, model: linen.Module, ds_train: BaseDataset, sde: SDE) -> None: """Train the model with optional evaluation and logging. This method encapsulates the entire training process including initialization, training loop, checkpointing, evaluation, and logging. It supports different sampling types like colorization, inpainting, super resolution, and deblurring. Args: model (linen.Module): The model to be trained. ds_train (BaseDataset): The training dataset. sde (SDE): Stochastic differential equation object, governing the dynamics for sampling. Raises: ValueError: If an unsupported dataset type is provided. Note: The method leverages the Weights & Biases (wandb) platform for logging and checkpointing, make sure it's configured properly if logging is enabled. """ run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input = self.initialize_run( model, ds_train, sde ) # `state.step` is JAX integer on the GPU/TPU devices start_step = int(state.step) rng = state.rng # Replicate the train state on all devices ( p_params, p_opt_state_params, p_step, p_ema_params, p_batch_input, ) = flax_utils.replicate( ( state.params, state.opt_state_params, state.step, state.ema_params, batch_input, ) ) # update the TrainState with replicated parameters and optimizer state state = state.replace( params=p_params, opt_state_params=p_opt_state_params, step=p_step, ema_params=p_ema_params, ) if jax.host_id() == 0: pylogger.info("Starting training loop at step %d." % (start_step,)) rng = jax.random.fold_in(rng, jax.host_id()) assert ( self.training_config.log_freq % self.training_config.n_jitted_steps == 0 and self.training_config.eval_freq % self.training_config.n_jitted_steps == 0 ), "Missing logs or checkpoints!" ds_train_iter = iter(ds_train) with tqdm( total=self.training_config.total_steps + 1, initial=start_step, position=0, leave=True, ) as pbar: for step in range( start_step, self.training_config.total_steps + 1, self.training_config.n_jitted_steps, ): # Get the next batch of data and scale it batch = jax.tree_map(f=lambda x: scaler(x._numpy()), tree=next(ds_train_iter)["data"]) if not self.training_config.sampling_only: # Split the random number generator for the current step rng, *next_rng = jax.random.split(key=rng, num=jax.local_device_count() + 1) next_rng = jnp.asarray(next_rng) ((_, state), batch_reconstructed, batch_corrupted, target) = self.train_step( train_step_fn=train_step_fn, carry_state=(next_rng, state), batch=batch, batch_input=p_batch_input, ) if not self.training_config.sampling_only and ( (jax.host_id() == 0 and step % self.training_config.checkpoint_freq == 0 and step != 0) ): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1)
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6)) state = TrainState.create( apply_fn=model.apply, params=params, tx=self.optimizer, opt_state_params=self.optimizer.init(params), rng=rng, ema_params=params, ) train_step_fn = construct_train_step(self.optimizer, self.loss_obj.construct_loss_fn(model)) sample_fn = construct_sampling_fn(model, self.sampler) # Resume training when intermediate checkpoints are detected if self.training_config.resume_training: pylogger.warning("Resuming training from the latest checkpoint.") if self.logging.use_wandb and self.model_name != "local": model_file = wandb.use_artifact(self.model_name).download() state = restore_checkpoint(ckpt_dir=model_file, prefix="checkpoint_", target=state) else: state = checkpoints.restore_checkpoint(ckpt_dir=self.checkpoint_dir, target=state) return run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input def train_step( self, train_step_fn: Callable, carry_state: Tuple, batch: jnp.ndarray, batch_input: jnp.ndarray, ) -> Tuple: """Perform a single training step, updating the model parameters. Args: train_step_fn (Callable): The train step function. carry_state (Tuple): The current state of the model and optimizer. batch (jnp.ndarray): The batch of data used for training. batch_input (jnp.ndarray): The input data to the model. Returns: Tuple: The updated state after performing the training step. """ (rng, state) = carry_state ( new_rng, loss, loss_inner, new_params, new_optim_state, batch_reconstructed, batch_corrupted, target, ) = train_step_fn( rng, state.params, state.opt_state_params, state.step, batch_input, batch, ) ema_rate = self.training_config.ema_rate new_params_ema = jax.tree_map( lambda p_ema, p: p_ema * ema_rate + p * (1.0 - ema_rate), state.ema_params, new_params, ) # update the state new_state = state.replace( rng=flax.jax_utils.unreplicate(new_rng), step=state.step + 1, opt_state_params=new_optim_state, params=new_params, ema_params=new_params_ema, ) new_carry_state = (new_rng, new_state) loss = flax.jax_utils.unreplicate(loss) step = int(flax_utils.unreplicate(state.step)) # Log the training progress if jax.host_id() == 0 and step % self.training_config.log_freq == 0: pylogger.info("step: %d, training_loss: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, "loss": loss}, step=step) if loss_inner is not None: loss_inner = flax.jax_utils.unreplicate(loss_inner) for inner_step, loss in enumerate(loss_inner): pylogger.info("step: %d, training_loss_inner: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, f"loss inner step {inner_step}": loss}, step=step) return new_carry_state, batch_reconstructed, batch_corrupted, target def save_checkpoint(self, step, run, state): pylogger.info("Saving the model at step %d." % (step,)) # Log the evaluation progress # Save the model parameters ( params, opt_state_params, step_, ema_params, ) = flax_utils.unreplicate( ( state.params, state.opt_state_params, state.step, state.ema_params, ) ) saved_state = state.replace( step=step_, opt_state_params=opt_state_params, params=params, ema_params=ema_params, ) checkpoint_file = checkpoints.save_checkpoint( self.checkpoint_dir, saved_state, step=step_ // self.training_config.eval_freq, keep=np.inf, ) if self.logging.use_wandb: wandb_model_artifact_name = str(step_) + "_" + run.id wandb_model = wandb.Artifact(wandb_model_artifact_name, type="model") wandb_model.add_file(checkpoint_file) run.log_artifact(wandb_model) # noinspection PyProtectedMember def train(self, model: linen.Module, ds_train: BaseDataset, sde: SDE) -> None: """Train the model with optional evaluation and logging. This method encapsulates the entire training process including initialization, training loop, checkpointing, evaluation, and logging. It supports different sampling types like colorization, inpainting, super resolution, and deblurring. Args: model (linen.Module): The model to be trained. ds_train (BaseDataset): The training dataset. sde (SDE): Stochastic differential equation object, governing the dynamics for sampling. Raises: ValueError: If an unsupported dataset type is provided. Note: The method leverages the Weights & Biases (wandb) platform for logging and checkpointing, make sure it's configured properly if logging is enabled. """ run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input = self.initialize_run( model, ds_train, sde ) # `state.step` is JAX integer on the GPU/TPU devices start_step = int(state.step) rng = state.rng # Replicate the train state on all devices ( p_params, p_opt_state_params, p_step, p_ema_params, p_batch_input, ) = flax_utils.replicate( ( state.params, state.opt_state_params, state.step, state.ema_params, batch_input, ) ) # update the TrainState with replicated parameters and optimizer state state = state.replace( params=p_params, opt_state_params=p_opt_state_params, step=p_step, ema_params=p_ema_params, ) if jax.host_id() == 0: pylogger.info("Starting training loop at step %d." % (start_step,)) rng = jax.random.fold_in(rng, jax.host_id()) assert ( self.training_config.log_freq % self.training_config.n_jitted_steps == 0 and self.training_config.eval_freq % self.training_config.n_jitted_steps == 0 ), "Missing logs or checkpoints!" ds_train_iter = iter(ds_train) with tqdm( total=self.training_config.total_steps + 1, initial=start_step, position=0, leave=True, ) as pbar: for step in range( start_step, self.training_config.total_steps + 1, self.training_config.n_jitted_steps, ): # Get the next batch of data and scale it batch = jax.tree_map(f=lambda x: scaler(x._numpy()), tree=next(ds_train_iter)["data"]) if not self.training_config.sampling_only: # Split the random number generator for the current step rng, *next_rng = jax.random.split(key=rng, num=jax.local_device_count() + 1) next_rng = jnp.asarray(next_rng) ((_, state), batch_reconstructed, batch_corrupted, target) = self.train_step( train_step_fn=train_step_fn, carry_state=(next_rng, state), batch=batch, batch_input=p_batch_input, ) if not self.training_config.sampling_only and ( (jax.host_id() == 0 and step % self.training_config.checkpoint_freq == 0 and step != 0) ): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1)
batch_sampled, batch_sampled_last, batch_sampled_all = colorizing_fn(
15
2023-10-24 22:01:35+00:00
24k
violet-sto/HN-GFN
main_mobo.py
[ { "identifier": "Dataset", "path": "dataset.py", "snippet": "class Dataset:\n\n def __init__(self, args, bpath, oracle, device):\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.train_mols = []\n self.test_mols = []\n self.all_mols = []\n self.train_mols_map = {}\n\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(device, args.proxy_repr_type, include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n if args.floatX == 'float64':\n self.mdp.floatX = torch.double\n else:\n self.mdp.floatX = torch.float\n self.mdp._cue_max_blocks = args.max_blocks\n self.max_blocks = args.max_blocks\n self.oracle = oracle\n self._device = device\n self.seen_molecules = set()\n self.stop_event = threading.Event()\n\n self.target_norm = [-8.6, 1.10] # for dockerscore\n\n self.hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives)))\n\n def load_h5(self, path, test_ratio=0.1, num_init_examples=None):\n import json\n columns = [\"smiles\", \"dockscore\",\"blockidxs\", \"slices\", \"jbonds\", \"stems\"]\n store = pd.HDFStore(path, 'r')\n df = store.select('df')\n # Pandas has problem with calculating some stuff on float16\n df.dockscore = df.dockscore.astype(\"float64\")\n for cl_mame in columns[2:]:\n df.loc[:, cl_mame] = df[cl_mame].apply(json.loads)\n\n test_idxs = self.test_split_rng.choice(\n len(df), int(test_ratio * len(df)), replace=False)\n\n split_bool = np.zeros(len(df), dtype=np.bool)\n split_bool[test_idxs] = True\n self.scores = []\n self.smis = []\n for i in tqdm(range(len(df))):\n m = BlockMoleculeDataExtended()\n for c in range(1, len(columns)):\n setattr(m, columns[c], df.iloc[i, c - 1])\n m.blocks = [self.mdp.block_mols[i] for i in m.blockidxs]\n if len(m.blocks) > self.max_blocks:\n continue\n m.numblocks = len(m.blocks)\n m.score = self.oracle.get_score([m])\n self.scores.append(m.score)\n self.smis.append(m.smiles)\n self.all_mols.append(m)\n if split_bool[i]: \n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n if len(self.train_mols)+len(self.test_mols) >= num_init_examples:\n break\n store.close()\n\n print(\"Sampling initial {} molecules from all {} molecules...\".format(\n num_init_examples, len(split_bool)))\n print(len(self.train_mols), 'train mols')\n print(len(self.test_mols), 'test mols')\n\n def r2r(self, dockscore=None, normscore=None):\n if dockscore is not None:\n normscore = 4-(min(0, dockscore) -\n self.target_norm[0])/self.target_norm[1]\n normscore = max(0.1, normscore)\n return (normscore/1) ** 1\n\n def _get(self, i, dset):\n return [(dset[i], dset[i].score)]\n\n def sample(self, n):\n eidx = np.random.randint(0, len(self.train_mols), n)\n samples = sum((self._get(i, self.train_mols) for i in eidx), [])\n\n return zip(*samples)\n\n def sample2batch(self, mb):\n s, r = mb\n s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s])\n r = torch.tensor(pd.DataFrame.from_dict(\n r).values, device=self._device).float()\n return (s, r)\n\n def iterset(self, n, mode):\n if mode == 'test':\n dset = self.test_mols\n elif mode == 'train':\n dset = self.train_mols\n\n N = len(dset)\n for i in range(int(np.ceil(N/n))):\n samples = sum((self._get(j, dset)\n for j in range(i*n, min(N, (i+1)*n))), [])\n yield self.sample2batch(zip(*samples))\n\n def add_samples(self, batch):\n picked_mols, scores, picked_smis = batch\n\n for m in picked_mols:\n if np.random.uniform() < (1/10):\n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n self.all_mols.append(m)\n \n self.scores += scores\n self.smis += [smis[-1] for smis in picked_smis]\n \n self.stop_event.clear()\n\n def compute_hypervolume(self):\n scores = torch.tensor(pd.DataFrame.from_dict(self.scores).values)\n volume = self.hypervolume.compute(scores)\n\n return volume\n \n def start_samplers(self, n, mbsize):\n self.ready_events = [threading.Event() for i in range(n)]\n self.resume_events = [threading.Event() for i in range(n)]\n self.results = [None] * n\n def f(idx):\n while not self.stop_event.is_set():\n try:\n self.results[idx] = self.sample2batch(self.sample(mbsize))\n except Exception as e:\n print(\"Exception while sampling:\")\n print(e)\n self.sampler_threads[idx].failed = True\n self.sampler_threads[idx].exception = e\n self.ready_events[idx].set()\n break\n self.ready_events[idx].set()\n self.resume_events[idx].clear()\n self.resume_events[idx].wait()\n self.sampler_threads = [threading.Thread(target=f, args=(i,)) for i in range(n)]\n [setattr(i, 'failed', False) for i in self.sampler_threads]\n [i.start() for i in self.sampler_threads]\n round_robin_idx = [0]\n def get():\n while True:\n idx = round_robin_idx[0]\n round_robin_idx[0] = (round_robin_idx[0] + 1) % n\n if self.ready_events[idx].is_set():\n r = self.results[idx]\n self.ready_events[idx].clear()\n self.resume_events[idx].set()\n return r\n elif round_robin_idx[0] == 0:\n time.sleep(0.001)\n return get\n\n def stop_samplers_and_join(self):\n self.stop_event.set()\n if hasattr(self, 'sampler_threads'):\n while any([i.is_alive() for i in self.sampler_threads]):\n [i.set() for i in self.resume_events]\n [i.join(0.05) for i in self.sampler_threads]" }, { "identifier": "MolMDPExtended", "path": "mol_mdp_ext.py", "snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in range(len(self.block_mols)):\n # Blocks have multiple ways of being attached. By default,\n # a new block is attached to the target stem by attaching\n # it's kth atom, where k = block_rs[new_block_idx][0].\n # When computing a reverse action (from a parent), we may\n # wish to attach the new block to a different atom. In\n # the blocks library, there are duplicates of the same\n # block but with block_rs[block][0] set to a different\n # atom. Thus, for the reverse action we have to find out\n # which duplicate this corresponds to.\n\n # Here, we compute, for block blockidx, what is the index\n # of the duplicate block, if someone wants to attach to\n # atom x of the block.\n # So atom_map[x] == bidx, such that block_rs[bidx][0] == x\n atom_map = {}\n for j in range(len(self.block_mols)):\n if self.block_smi[blockidx] == self.block_smi[j]:\n atom_map[self.block_rs[j][0]] = j\n self.translation_table[blockidx] = atom_map\n\n # We're still missing some \"duplicates\", as some might be\n # symmetric versions of each other. For example, block CC with\n # block_rs == [0,1] has no duplicate, because the duplicate\n # with block_rs [1,0] would be a symmetric version (both C\n # atoms are the \"same\").\n\n # To test this, let's create nonsense molecules by attaching\n # duplicate blocks to a Gold atom, and testing whether they\n # are the same.\n gold = Chem.MolFromSmiles('[Au]')\n # If we find that two molecules are the same when attaching\n # them with two different atoms, then that means the atom\n # numbers are symmetries. We can add those to the table.\n for blockidx in range(len(self.block_mols)):\n for j in self.block_rs[blockidx]:\n if j not in self.translation_table[blockidx]:\n symmetric_duplicate = None\n for atom, block_duplicate in self.translation_table[blockidx].items():\n molA, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,j]],\n frags=[gold, self.block_mols[blockidx]])\n molB, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,atom]],\n frags=[gold, self.block_mols[blockidx]])\n if (Chem.MolToSmiles(molA) == Chem.MolToSmiles(molB) or\n molA.HasSubstructMatch(molB)):\n symmetric_duplicate = block_duplicate\n break\n if symmetric_duplicate is None:\n raise ValueError('block', blockidx, self.block_smi[blockidx],\n 'has no duplicate for atom', j,\n 'in position 0, and no symmetrical correspondance')\n self.translation_table[blockidx][j] = symmetric_duplicate\n #print('block', blockidx, '+ atom', j,\n # 'in position 0 is a symmetric duplicate of',\n # symmetric_duplicate)\n\n def parents(self, mol=None):\n \"\"\"returns all the possible parents of molecule mol (or the current\n molecule if mol is None.\n\n Returns a list of (BlockMoleculeDataExtended, (block_idx, stem_idx)) pairs such that\n for a pair (m, (b, s)), MolMDPExtended.add_block_to(m, b, s) == mol.\n \"\"\"\n if len(mol.blockidxs) == 1:\n # If there's just a single block, then the only parent is\n # the empty block with the action that recreates that block\n return [(BlockMoleculeDataExtended(), (mol.blockidxs[0], 0))]\n\n # Compute the how many blocks each block is connected to\n blocks_degree = defaultdict(int)\n for a,b,_,_ in mol.jbonds:\n blocks_degree[a] += 1\n blocks_degree[b] += 1\n # Keep only blocks of degree 1 (those are the ones that could\n # have just been added)\n blocks_degree_1 = [i for i, d in blocks_degree.items() if d == 1]\n # Form new molecules without these blocks\n parent_mols = []\n\n for rblockidx in blocks_degree_1:\n new_mol = mol.copy()\n # find which bond we're removing\n removed_bonds = [(jbidx, bond) for jbidx, bond in enumerate(new_mol.jbonds)\n if rblockidx in bond[:2]]\n assert len(removed_bonds) == 1\n rjbidx, rbond = removed_bonds[0]\n # Pop the bond\n new_mol.jbonds.pop(rjbidx)\n # Remove the block\n mask = np.ones(len(new_mol.blockidxs), dtype=np.bool)\n mask[rblockidx] = 0\n reindex = new_mol.delete_blocks(mask)\n # reindex maps old blockidx to new blockidx, since the\n # block the removed block was attached to might have its\n # index shifted by 1.\n\n # Compute which stem the bond was using\n stem = ([reindex[rbond[0]], rbond[2]] if rblockidx == rbond[1] else\n [reindex[rbond[1]], rbond[3]])\n # and add it back\n new_mol.stems = [list(i) for i in new_mol.stems] + [stem]\n #new_mol.stems.append(stem)\n # and we have a parent. The stem idx to recreate mol is\n # the last stem, since we appended `stem` in the back of\n # the stem list.\n # We also have to translate the block id to match the bond\n # we broke, see build_translation_table().\n removed_stem_atom = (\n rbond[3] if rblockidx == rbond[1] else rbond[2])\n blockid = mol.blockidxs[rblockidx]\n if removed_stem_atom not in self.translation_table[blockid]:\n raise ValueError('Could not translate removed stem to duplicate or symmetric block.')\n parent_mols.append([new_mol,\n # action = (block_idx, stem_idx)\n (self.translation_table[blockid][removed_stem_atom],\n len(new_mol.stems) - 1)])\n if not len(parent_mols):\n raise ValueError('Could not find any parents')\n return parent_mols\n\n\n def add_block_to(self, mol, block_idx, stem_idx=None, atmidx=None):\n '''out-of-place version of add_block'''\n #assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), \"unknown block\"\n if mol.numblocks == 0:\n stem_idx = None\n new_mol = mol.copy()\n new_mol.add_block(block_idx,\n block=self.block_mols[block_idx],\n block_r=self.block_rs[block_idx],\n stem_idx=stem_idx, atmidx=atmidx)\n return new_mol\n\n def remove_jbond_from(self, mol, jbond_idx=None, atmidx=None):\n new_mol = mol.copy()\n new_mol.remove_jbond(jbond_idx, atmidx)\n return new_mol\n\n def a2mol(self, acts):\n mol = BlockMoleculeDataExtended()\n for i in acts:\n if i[0] >= 0:\n mol = self.add_block_to(mol, *i)\n return mol\n\n def reset(self):\n self.molecule = BlockMoleculeDataExtended()\n return None\n\n\n def post_init(self, device, repr_type, include_bonds=False, include_nblocks=False):\n self.device = device\n self.repr_type = repr_type\n #self.max_bond_atmidx = max([max(i) for i in self.block_rs])\n self.max_num_atm = max(self.block_natm)\n # see model_block.mol2graph\n self.true_block_set = sorted(set(self.block_smi))\n self.stem_type_offset = np.int32([0] + list(np.cumsum([\n max(self.block_rs[self.block_smi.index(i)])+1 for i in self.true_block_set])))\n self.num_stem_types = self.stem_type_offset[-1]\n self.true_blockidx = [self.true_block_set.index(i) for i in self.block_smi]\n self.num_true_blocks = len(self.true_block_set)\n self.include_nblocks = include_nblocks\n self.include_bonds = include_bonds\n #print(self.max_num_atm, self.num_stem_types)\n self.molcache = {}\n\n def mols2batch(self, mols):\n if self.repr_type == 'block_graph':\n return model_block.mols2batch(mols, self)\n elif self.repr_type == 'atom_graph':\n return model_atom.mols2batch(mols, self)\n elif self.repr_type == 'morgan_fingerprint':\n return model_fingerprint.mols2batch(mols, self)\n\n def mol2repr(self, mol=None):\n if mol is None:\n mol = self.molecule\n #molhash = str(mol.blockidxs)+':'+str(mol.stems)+':'+str(mol.jbonds)\n #if molhash in self.molcache:\n # return self.molcache[molhash]\n if self.repr_type == 'block_graph':\n r = model_block.mol2graph(mol, self, self.floatX)\n elif self.repr_type == 'atom_graph':\n r = model_atom.mol2graph(mol, self, self.floatX,\n bonds=self.include_bonds,\n nblocks=self.include_nblocks)\n elif self.repr_type == 'morgan_fingerprint':\n r = model_fingerprint.mol2fp(mol, self, self.floatX)\n #self.molcache[molhash] = r\n return r\n\n def get_nx_graph(self, mol: BlockMoleculeData, true_block=False):\n true_blockidx = self.true_blockidx\n\n G = nx.DiGraph()\n blockidxs = [true_blockidx[xx] for xx in mol.blockidxs] if true_block else mol.blockidxs\n\n G.add_nodes_from([(ix, {\"block\": blockidxs[ix]}) for ix in range(len(blockidxs))])\n\n if len(mol.jbonds) > 0:\n edges = []\n for jbond in mol.jbonds:\n edges.append((jbond[0], jbond[1],\n {\"bond\": [jbond[2], jbond[3]]}))\n edges.append((jbond[1], jbond[0],\n {\"bond\": [jbond[3], jbond[2]]}))\n G.add_edges_from(edges)\n return G\n\n def graphs_are_isomorphic(self, g1, g2):\n return nx.algorithms.is_isomorphic(g1, g2, node_match=node_match, edge_match=edge_match)" }, { "identifier": "BlockMoleculeDataExtended", "path": "mol_mdp_ext.py", "snippet": "class BlockMoleculeDataExtended(BlockMoleculeData):\n\n @property\n def mol(self):\n return chem.mol_from_frag(jun_bonds=self.jbonds, frags=self.blocks)[0]\n\n @property\n def smiles(self):\n return Chem.MolToSmiles(self.mol)\n\n def copy(self): # shallow copy\n o = BlockMoleculeDataExtended()\n o.blockidxs = list(self.blockidxs)\n o.blocks = list(self.blocks)\n o.slices = list(self.slices)\n o.numblocks = self.numblocks\n o.jbonds = list(self.jbonds)\n o.stems = list(self.stems)\n return o\n\n def as_dict(self):\n return {'blockidxs': self.blockidxs,\n 'slices': self.slices,\n 'numblocks': self.numblocks,\n 'jbonds': self.jbonds,\n 'stems': self.stems}" }, { "identifier": "Oracle", "path": "oracle/oracle.py", "snippet": "class Oracle():\n def __init__(self, args, mols_ref=None):\n '''\n @params:\n args (dict): argsurations\n '''\n self.objectives = args.objectives\n self.fps_ref = [AllChem.GetMorganFingerprintAsBitVect(x, 3, 2048) \n for x in mols_ref] if mols_ref else None\n self.device = torch.device(args.device)\n\n def batch_get_scores(self, mols):\n '''\n @params:\n mols: molecules to estimate score\n @return:\n dicts (list): list of score dictionaries\n '''\n dicts = [{} for _ in mols]\n for obj in self.objectives:\n scores = get_scores(obj, mols, device=self.device)\n for i, mol in enumerate(mols):\n dicts[i][obj] = scores[i]\n return dicts\n \n def get_score(self, mol):\n scores = {}\n for obj in self.objectives:\n score = get_scores(obj, mol, device=self.device)\n scores[obj] = score[0]\n \n return scores" }, { "identifier": "get_proxy", "path": "proxy/proxy.py", "snippet": "def get_proxy(args, bpath, oracle):\n if args.acq_fn.lower() == 'none':\n return NoAF(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ucb':\n return UCB(args, bpath, oracle)\n \n elif args.acq_fn.lower() == 'ucb_chebyshev':\n return UCB_chebyshev(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ei':\n return EI(args, bpath, oracle)" }, { "identifier": "FMGFlowNet", "path": "generator/gfn.py", "snippet": "class FMGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n mdp = MolMDPExtended(bpath)\n mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n mdp.build_translation_table()\n self.model = make_model(args, mdp, is_proxy=False)\n self.opt = torch.optim.Adam(self.model.parameters(\n ), args.learning_rate, weight_decay=args.weight_decay)\n\n self.loginf = 1000 # to prevent nans\n self.log_reg_c = args.log_reg_c\n self.balanced_loss = args.balanced_loss\n self.do_nblocks_reg = False\n self.max_blocks = args.max_blocks\n self.leaf_coef = args.leaf_coef\n self.clip_grad = args.clip_grad\n # self.score_criterion = nn.MSELoss(reduction='none')\n self.score_criterion = nn.MSELoss()\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss, term_loss, flow_loss = self.FMLoss(p, pb, a, pw, w, r, s, d)\n\n self.opt.zero_grad()\n loss.backward()\n if self.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.clip_grad)\n self.opt.step()\n self.model.training_steps = i+1\n \n return (loss.item(), term_loss.item(), flow_loss.item())\n\n def FMLoss(self, p, pb, a, pw, w, r, s, d):\n # Since we sampled 'mbsize' trajectories, we're going to get\n # roughly mbsize * H (H is variable) transitions\n ntransitions = r.shape[0]\n # state outputs\n stem_out_s, mol_out_s = self.model(s, w) # log(F)\n # parents of the state outputs\n stem_out_p, mol_out_p = self.model(p, pw)\n # index parents by their corresponding actions\n qsa_p = self.model.index_output_by_action(\n p, stem_out_p, mol_out_p[:, 0], a)\n # then sum the parents' contribution, this is the inflow\n exp_inflow = (torch.zeros((ntransitions,), device=qsa_p.device, dtype=qsa_p.dtype)\n .index_add_(0, pb, torch.exp(qsa_p))) # pb is the parents' batch index\n inflow = torch.log(exp_inflow + self.log_reg_c)\n # sum the state's Q(s,a), this is the outflow\n exp_outflow = self.model.sum_output(s, torch.exp(\n stem_out_s), torch.exp(mol_out_s[:, 0]))\n # include reward and done multiplier, then take the log\n # we're guarenteed that r > 0 iff d = 1, so the log always works\n outflow_plus_r = torch.log(self.log_reg_c + r + exp_outflow * (1-d))\n if self.do_nblocks_reg:\n losses = _losses = ((inflow - outflow_plus_r) /\n (s.nblocks * self.max_blocks)).pow(2)\n else:\n losses = _losses = (inflow - outflow_plus_r).pow(2)\n\n term_loss = (losses * d).sum() / (d.sum() + 1e-20) # terminal nodes\n flow_loss = (losses * (1-d)).sum() / \\\n ((1-d).sum() + 1e-20) # non-terminal nodes\n \n if self.balanced_loss:\n loss = term_loss * self.leaf_coef + flow_loss\n else:\n loss = losses.mean()\n\n return loss, term_loss, flow_loss" }, { "identifier": "TBGFlowNet", "path": "generator/gfn.py", "snippet": "class TBGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n self.model = make_model(args, self.mdp, is_proxy=False)\n self.Z = nn.Sequential(nn.Linear(len(args.objectives), args.nemb//2), nn.LeakyReLU(),\n nn.Linear(args.nemb//2, 1))\n self.Z.to(args.device)\n self.opt = torch.optim.Adam(self.model.parameters(), args.learning_rate, weight_decay=args.weight_decay)\n self.opt_Z = torch.optim.Adam(self.Z.parameters(), args.Z_learning_rate, weight_decay=args.weight_decay)\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss = self.TBLoss(p, a, w, r, d, mols)\n self.opt.zero_grad()\n self.opt_Z.zero_grad()\n loss.backward()\n if self.args.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.args.clip_grad)\n self.opt.step()\n self.opt_Z.step()\n\n return (loss.item(),)\n\n @property\n def Z(self):\n return self.model.Z\n\n def TBLoss(self, p, a, w, r, d, mols):\n # logit\n stem_out_p, mol_out_p = self.model(p, w)\n # index parents by their corresponding actions\n logits = -self.model.action_negloglikelihood(\n p, a, stem_out_p, mol_out_p)\n\n b = torch.cat([torch.tensor([0], device=logits.device),\n torch.cumsum(d.long(), 0)[:-1]], dim=0)\n n = torch.tensor([len(self.mdp.parents(mol)) if a[idx, 0].item() != -1 else 1.\n for idx, mol in enumerate(mols[1])], device=logits.device)\n # n = torch.tensor([len(self.mdp.parents(mol)) for mol in mols[1]], device=logits.device)\n forward_ll = scatter(logits, b, reduce='sum')\n backward_ll = scatter(torch.log(1/n), b, reduce='sum')\n\n losses = ((self.Z(w[d==1.]) + forward_ll) - (torch.log(r[d == 1.]) + backward_ll)).pow(2) \n loss = losses.mean()\n\n return loss" }, { "identifier": "MOReinforce", "path": "generator/gfn.py", "snippet": "class MOReinforce(TBGFlowNet):\n def TBLoss(self, p, a, w, r, d, mols):\n # logit\n stem_out_p, mol_out_p = self.model(p, w)\n # index parents by their corresponding actions\n logits = -self.model.action_negloglikelihood(\n p, a, stem_out_p, mol_out_p)\n\n b = torch.cat([torch.tensor([0], device=logits.device),\n torch.cumsum(d.long(), 0)[:-1]], dim=0)\n n = torch.tensor([len(self.mdp.parents(mol)) if a[idx, 0].item() != -1 else 1.\n for idx, mol in enumerate(mols[1])], device=logits.device)\n # n = torch.tensor([len(self.mdp.parents(mol)) for mol in mols[1]], device=logits.device)\n forward_ll = scatter(logits, b, reduce='sum')\n\n rewards = r[d == 1.]\n losses = forward_ll * (-rewards - (-1) * rewards.mean())\n loss = losses.mean()\n\n return loss" }, { "identifier": "set_random_seed", "path": "utils/utils.py", "snippet": "def set_random_seed(seed, deterministic=True):\n \"\"\"Set random seed.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "compute_success", "path": "utils/metrics.py", "snippet": "def compute_success(mols, scores, objectives, score_succ):\n print(\"Computing successful rate...\")\n positive_mols = []\n success_dict = {k: 0. for k in objectives}\n\n for mol, score in zip(mols, scores):\n all_success = True\n for k, v in score.items():\n if v >= score_succ[k]:\n success_dict[k] += 1\n else:\n all_success = False\n if all_success:\n positive_mols.append(mol)\n\n success = 1.*len(positive_mols)/len(mols)\n\n return success, positive_mols" }, { "identifier": "compute_diversity", "path": "utils/metrics.py", "snippet": "def compute_diversity(mols):\n print(\"Computing diversity...\")\n\n if len(mols) == 0:\n return 0\n\n sims = []\n fps = [AllChem.GetMorganFingerprintAsBitVect(x.mol, 3, 2048) for x in mols]\n for i in range(len(fps)):\n sims += DataStructs.BulkTanimotoSimilarity(fps[i], fps[:i])\n\n return 1 - np.mean(sims)" }, { "identifier": "compute_novelty", "path": "utils/metrics.py", "snippet": "def compute_novelty(mols, ref_mols):\n print(\"Computing novelty...\")\n positive_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x.mol, 3, 2048) for x in mols]\n ref_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x, 3, 2048) for x in ref_mols]\n\n n_sim = 0.\n for i in range(len(positive_fps)):\n sims = DataStructs.BulkTanimotoSimilarity(positive_fps[i], ref_fps)\n if max(sims) >= 0.4:\n n_sim += 1\n novelty = 1. - 1. * n_sim / (len(positive_fps)+1e-6)\n\n return novelty" }, { "identifier": "compute_correlation", "path": "utils/metrics.py", "snippet": "def compute_correlation(args, model, rollout_worker, test_mols):\n\n mdp = rollout_worker.mdp\n device = args.device\n def tf(x): return torch.tensor(x, device=device).to(torch.float)\n def tint(x): return torch.tensor(x, device=device).long()\n\n # test_mols = pickle.load(gzip.open('data/some_mols_U_1k.pkl.gz'))\n logsoftmax = nn.LogSoftmax(0)\n corrs = []\n numblocks = []\n\n start_time = time.time()\n if args.n_objectives == 3:\n test_weights = rollout_worker.test_weights[::2]\n elif args.n_objectives == 4:\n test_weights = rollout_worker.test_weights[1:-2:4]\n else:\n test_weights = rollout_worker.test_weights\n \n for weights in test_weights:\n print(\"Computing correlation w.r.t test weights {}\".format(weights))\n weights = torch.tensor(weights).to(args.device)\n logp = []\n rewards = []\n for m in tqdm(test_mols):\n try:\n agraph = get_mol_path_graph(m, mdp)\n except:\n continue\n # rewards.append(np.log(moli[0][0]))\n reward = rollout_worker._get_reward(m, weights)[0].item()\n rewards.append(np.log(reward))\n s = mdp.mols2batch([mdp.mol2repr(agraph.nodes[i]['mol'])\n for i in agraph.nodes])\n numblocks.append(len(m.blocks))\n with torch.no_grad():\n # get the mols_out_s for ALL molecules not just the end one.\n if args.condition_type == 'Hyper_scorepred':\n stem_out_s, mol_out_s, _ = model(\n s, weights.repeat(s.num_graphs, 1))\n else:\n stem_out_s, mol_out_s = model(\n s, weights.repeat(s.num_graphs, 1))\n per_mol_out = []\n # Compute pi(a|s)\n for j in range(len(agraph.nodes)):\n a, b = s._slice_dict['stems'][j:j+2]\n\n stop_allowed = len(\n agraph.nodes[j]['mol'].blocks) >= args.min_blocks\n mp = logsoftmax(torch.cat([\n stem_out_s[a:b].reshape(-1),\n # If num_blocks < min_blocks, the model is not allowed to stop\n mol_out_s[j, :1] if stop_allowed else tf([-1000])]))\n per_mol_out.append(\n (mp[:-1].reshape((-1, stem_out_s.shape[1])), mp[-1]))\n\n # When the model reaches 8 blocks, it is stopped automatically. If instead it stops before\n # that, we need to take into account the STOP action's logprob\n if len(m.blocks) < 8:\n if args.condition_type == 'Hyper_scorepred':\n stem_out_last, mol_out_last, _ = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0))\n else:\n stem_out_last, mol_out_last = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0)) \n mplast = logsoftmax(\n torch.cat([stem_out_last.reshape(-1), mol_out_last[0, :1]]))\n MSTOP = mplast[-1]\n\n # assign logprob to edges\n for u, v in agraph.edges:\n a = agraph.edges[u, v]['action']\n if a[0] == -1:\n agraph.edges[u, v]['logprob'] = per_mol_out[v][1]\n else:\n agraph.edges[u,\n v]['logprob'] = per_mol_out[v][0][a[1], a[0]]\n\n # propagate logprobs through the graph\n for n in list(nx.topological_sort(agraph))[::-1]:\n for c in agraph.predecessors(n):\n if len(m.blocks) < 8 and c == 0:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0) + MSTOP)\n else:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0))\n\n # add the first item\n # logp.append((moli, agraph.nodes[n]['logprob'].item()))\n logp.append(agraph.nodes[n]['logprob'].item())\n corrs.append(stats.spearmanr(rewards, logp).correlation)\n\n print('Spearmanr: {}, mean: {}, Time: {}'.format(corrs, np.mean(corrs), time.time()-start_time))\n return corrs" }, { "identifier": "circle_points", "path": "utils/metrics.py", "snippet": "def circle_points(K, min_angle=None, max_angle=None):\n # generate evenly distributed preference vector\n ang0 = 1e-6 if min_angle is None else min_angle\n ang1 = np.pi / 2 - ang0 if max_angle is None else max_angle\n angles = np.linspace(ang0, ang1, K, endpoint=True)\n x = np.cos(angles)\n y = np.sin(angles)\n weights = np.c_[x, y]\n normalized_weights = weights/weights.sum(1, keepdims=True)\n\n return normalized_weights.astype(np.float32)" }, { "identifier": "get_logger", "path": "utils/logging.py", "snippet": "def get_logger(args):\n if args.enable_tensorboard:\n return TensorboardLogger(args)\n else:\n return Logger(args)" }, { "identifier": "RolloutWorker", "path": "main.py", "snippet": "class RolloutWorker:\n def __init__(self, args, bpath, proxy, device):\n self.args = args\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(device, args.repr_type,\n include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n if args.floatX == 'float64':\n self.mdp.floatX = self.floatX = torch.double\n else:\n self.mdp.floatX = self.floatX = torch.float\n self.proxy = proxy\n self._device = device\n self.seen_molecules = set()\n self.stop_event = threading.Event()\n #######\n # This is the \"result\", here a list of (reward, BlockMolDataExt, info...) tuples\n self.sampled_mols = []\n self.online_mols = []\n self.hindsight_mols = []\n self.max_online_mols = 1000\n self.max_hindsight_mols = 1000\n\n self.min_blocks = args.min_blocks\n self.max_blocks = args.max_blocks\n self.mdp._cue_max_blocks = self.max_blocks\n self.reward_exp = args.reward_exp\n self.reward_min = args.reward_min\n self.reward_norm = args.reward_norm\n self.reward_exp_ramping = args.reward_exp_ramping\n self.random_action_prob = args.random_action_prob\n\n # If True this basically implements Buesing et al's TreeSample Q,\n # samples uniformly from it though, no MTCS involved\n if args.criterion == 'TB' or args.criterion == \"Reinforce\":\n self.ignore_parents = True\n elif args.criterion == 'FM':\n self.ignore_parents = False\n\n def rollout(self, generator, use_rand_policy=True, weights=None, replay=False):\n weights = Dirichlet(torch.ones(len(self.args.objectives))*self.args.alpha).sample_n(1).to(\n self.args.device) if weights is None else weights\n\n m = BlockMoleculeDataExtended()\n samples = []\n max_blocks = self.max_blocks\n trajectory_stats = []\n for t in range(max_blocks):\n s = self.mdp.mols2batch([self.mdp.mol2repr(m)])\n s_o, m_o = generator(s, vec_data=weights, do_stems=True)\n # fix from run 330 onwards\n if t < self.min_blocks:\n m_o = m_o*0 - 1000 # prevent assigning prob to stop\n # when we can't stop\n ##\n logits = torch.cat([m_o.reshape(-1), s_o.reshape(-1)])\n cat = torch.distributions.Categorical(\n logits=logits) \n action = cat.sample().item()\n\n if use_rand_policy and self.random_action_prob > 0: # just for training\n if self.train_rng.uniform() < self.random_action_prob:\n action = self.train_rng.randint(\n int(t < self.min_blocks), logits.shape[0])\n\n q = torch.cat([m_o.reshape(-1), s_o.reshape(-1)])\n trajectory_stats.append(\n (q[action].item(), action, torch.logsumexp(q, 0).item()))\n\n if t >= self.min_blocks and action == 0:\n r, raw_r = self._get_reward(m, weights) # r: reward, raw_r: scores for the objectives\n samples.append(((m,), ((-1, 0),), weights, weights, r, m, 1))\n break\n else:\n action = max(0, action-1)\n action = (action % self.mdp.num_blocks,\n action // self.mdp.num_blocks)\n m_old = m\n m = self.mdp.add_block_to(m, *action)\n if len(m.blocks) and not len(m.stems) or t == max_blocks - 1:\n # can't add anything more to this mol so let's make it\n # terminal. Note that this node's parent isn't just m,\n # because this is a sink for all parent transitions\n r, raw_r = self._get_reward(m, weights)\n if self.ignore_parents:\n samples.append(\n ((m_old,), (action,), weights, weights, r, m, 1))\n else:\n parents, actions = zip(*self.mdp.parents(m))\n samples.append((parents, actions, weights.repeat(\n len(parents), 1), weights, r, m, 1))\n break\n else:\n if self.ignore_parents:\n samples.append(\n ((m_old,), (action,), weights, weights, 0, m, 0))\n else:\n parents, actions = zip(*self.mdp.parents(m))\n samples.append(\n (parents, actions, weights.repeat(len(parents), 1), weights, 0, m, 0))\n\n p = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in samples[-1][0]])\n qp = generator(p, weights.repeat(p.num_graphs, 1))\n qsa_p = generator.model.index_output_by_action(\n p, qp[0], qp[1][:, 0],\n torch.tensor(samples[-1][1], device=self._device).long())\n inflow = torch.logsumexp(qsa_p.flatten(), 0).item()\n self.sampled_mols.append(\n ([i.cpu().numpy() for i in raw_r], weights.cpu().numpy(), m, trajectory_stats, inflow))\n\n if replay and self.args.hindsight_prob > 0.0:\n self._add_mol_to_replay(m)\n\n return samples\n\n def _get_reward(self, m, weights=None):\n rdmol = m.mol\n if rdmol is None:\n return self.reward_min\n \n # get scores from oracle\n score = self.proxy.get_score([m])\n score = torch.tensor(list(score.values())).to(self.args.device)\n \n if self.args.scalar == 'WeightedSum':\n raw_reward = (weights*score).sum()\n \n elif self.args.scalar == 'Tchebycheff':\n raw_reward = (weights*score).min() + 0.1 * (weights*score).sum()\n \n reward = self.l2r(raw_reward.clip(self.reward_min))\n return reward, (raw_reward, score)\n\n def execute_train_episode_batch(self, generator, dataset=None, use_rand_policy=True):\n if self.args.condition_type is None:\n weights = self.test_weights # train specific model\n else:\n weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better\n samples = sum((self.rollout(generator, use_rand_policy, weights)\n for i in range(self.args.trajectories_mbsize)), [])\n\n return zip(*samples)\n\n def sample2batch(self, mb):\n p, a, p_weights, weights, r, s, d, *o = mb\n mols = (p, s)\n # The batch index of each parent\n p_batch = torch.tensor(sum([[i]*len(p) for i, p in enumerate(p)], []),\n device=self._device).long()\n # Convert all parents and states to repr. Note that this\n # concatenates all the parent lists, which is why we need\n # p_batch\n p = self.mdp.mols2batch(list(map(self.mdp.mol2repr, sum(p, ()))))\n s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s])\n # Concatenate all the actions (one per parent per sample)\n a = torch.tensor(sum(a, ()), device=self._device).long()\n # rewards and dones\n r = torch.tensor(r, device=self._device).to(self.floatX)\n d = torch.tensor(d, device=self._device).to(self.floatX)\n # weights\n p_w = torch.cat(p_weights, 0)\n w = torch.cat(weights, 0)\n return (p, p_batch, a, p_w, w, r, s, d, mols, *o)\n\n def l2r(self, raw_reward, t=0):\n if self.reward_exp_ramping > 0:\n reward_exp = 1 + (self.reward_exp - 1) * \\\n (1 - 1/(1 + t / self.reward_exp_ramping))\n # when t=0, exp = 1; t->∞, exp = self.reward_exp\n else:\n reward_exp = self.reward_exp\n\n reward = (raw_reward/self.reward_norm)**reward_exp\n\n return reward\n\n def start_samplers(self, generator, n, dataset):\n self.ready_events = [threading.Event() for i in range(n)]\n self.resume_events = [threading.Event() for i in range(n)]\n self.results = [None] * n\n\n def f(idx):\n while not self.stop_event.is_set():\n try:\n self.results[idx] = self.sample2batch(\n self.execute_train_episode_batch(generator, dataset, use_rand_policy=True))\n except Exception as e:\n print(\"Exception while sampling:\")\n print(e)\n self.sampler_threads[idx].failed = True\n self.sampler_threads[idx].exception = e\n self.ready_events[idx].set()\n break\n self.ready_events[idx].set()\n self.resume_events[idx].clear()\n self.resume_events[idx].wait()\n\n self.sampler_threads = [threading.Thread(\n target=f, args=(i,)) for i in range(n)]\n [setattr(i, 'failed', False) for i in self.sampler_threads]\n [i.start() for i in self.sampler_threads]\n round_robin_idx = [0]\n\n def get():\n while True:\n idx = round_robin_idx[0]\n round_robin_idx[0] = (round_robin_idx[0] + 1) % n\n if self.ready_events[idx].is_set():\n r = self.results[idx]\n self.ready_events[idx].clear()\n self.resume_events[idx].set()\n return r\n elif round_robin_idx[0] == 0:\n time.sleep(0.001)\n return get\n\n def stop_samplers_and_join(self):\n self.stop_event.set()\n if hasattr(self, 'sampler_threads'):\n while any([i.is_alive() for i in self.sampler_threads]):\n [i.set() for i in self.resume_events]\n [i.join(0.05) for i in self.sampler_threads]" }, { "identifier": "get_test_mols", "path": "main.py", "snippet": "def get_test_mols(args, mdp, num):\n samples = []\n fps = []\n early_stops = []\n while len(samples) < num:\n if len(samples) % 5000 == 0:\n print(f'{len(samples)}/{num} mols have been sampled')\n m = BlockMoleculeDataExtended()\n min_blocks = args.min_blocks\n max_blocks = args.max_blocks\n early_stop_at = np.random.randint(min_blocks, max_blocks + 1)\n early_stops.append(early_stop_at)\n for t in range(max_blocks):\n if t == 0:\n length = mdp.num_blocks+1\n else:\n length = len(m.stems)*mdp.num_blocks+1\n\n action = np.random.randint(1, length)\n\n if t == early_stop_at:\n action = 0\n\n if t >= min_blocks and action == 0:\n fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048)\n if len(samples)==0:\n samples.append(m)\n fps.append(fp)\n else:\n sims = DataStructs.BulkTanimotoSimilarity(fp, fps)\n if max(sims) < 0.7:\n samples.append(m)\n fps.append(fp)\n break\n else:\n action = max(0, action-1)\n action = (action % mdp.num_blocks, action // mdp.num_blocks)\n #print('..', action)\n m = mdp.add_block_to(m, *action)\n if len(m.blocks) and not len(m.stems) or t == max_blocks - 1:\n # can't add anything more to this mol so let's make it\n # terminal. Note that this node's parent isn't just m,\n # because this is a sink for all parent transitions\n fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048)\n if len(samples)==0:\n samples.append(m)\n fps.append(fp)\n else:\n sims = DataStructs.BulkTanimotoSimilarity(fp, fps)\n if max(sims) < 0.7:\n samples.append(m)\n fps.append(fp)\n break\n \n return samples" } ]
from collections import defaultdict from dataset import Dataset from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended from oracle.oracle import Oracle from proxy import get_proxy from generator import TBGFlowNet, FMGFlowNet, MOReinforce from utils.utils import set_random_seed from utils.metrics import compute_success, compute_diversity, compute_novelty, compute_correlation, circle_points from utils.logging import get_logger from datetime import datetime from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.utils.sampling import sample_simplex from botorch.utils.transforms import normalize, unnormalize from torch.distributions.dirichlet import Dirichlet from main import RolloutWorker, get_test_mols from pymoo.util.ref_dirs import get_reference_directions from copy import deepcopy import random import os import re import argparse import json import time import threading import pdb import pickle import gzip import torch.multiprocessing as mp import torch.nn.functional as F import torch import pandas as pd import numpy as np import warnings
15,493
assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" dpath = "./data/docked_mols.h5" # Initialize oracle and dataset (for training surrogate function) oracle = Oracle(args) dataset = Dataset(args, bpath, oracle, args.device) dataset.load_h5(dpath, num_init_examples=args.num_init_examples) log_overall_metrics(args, dataset) args.n_objectives = len(args.objectives) # Initialize surrogate function proxy = get_proxy(args, bpath, oracle) proxy.update(dataset, 0, reset=False) for i in range(1, args.num_outer_loop_iters+1): print(f"====== Starting round {i} ======") args.logger.set_context('iter_{}'.format(i)) test_weights = np.random.dirichlet(args.alpha_vector, 5*(2**(args.n_objectives-2))).astype(np.float32) if args.criterion == 'TB': generator = TBGFlowNet(args, bpath) elif args.criterion == 'FM': generator = FMGFlowNet(args, bpath) elif args.criterion == 'Reinforce': generator = MOReinforce(args, bpath) else: raise ValueError('Not implemented!') rollout_worker, training_metrics = train_generative_model( args, generator, bpath, proxy, oracle, dataset, test_weights, i, do_save=args.save) # sample molecule batch from generator and update dataset with oracle scores for sampled batch batch, batch_infos, MultiObjective_metrics = sample_batch( args, generator, rollout_worker, oracle, proxy, compute_multi_objective_metric=True) dataset.add_samples(batch) log_overall_metrics(args, dataset, batch_infos, MultiObjective_metrics) args.logger.save(os.path.join(args.log_dir, 'logged_data.pkl.gz')) # update proxy with new data if i != args.num_outer_loop_iters: proxy.update(dataset, i, reset=True) if __name__ == '__main__': args = arg_parse()
warnings.filterwarnings('ignore') def arg_parse(): parser = argparse.ArgumentParser() parser.add_argument("--device", type=str, default='cuda') parser.add_argument('--seed', type=int, default=42, help='seed') parser.add_argument("--run", default=0, help="run", type=int) parser.add_argument('--save', action='store_true', default=False, help='Save model.') parser.add_argument('--debug',action='store_true', default=False, help='debug mode, no multi thread') parser.add_argument("--enable_tensorboard", action='store_true', default=False) parser.add_argument("--log_dir", default='runs/mobo') parser.add_argument("--include_nblocks", default=False) parser.add_argument("--num_init_examples", default=200, type=int) parser.add_argument("--num_outer_loop_iters", default=8, type=int) parser.add_argument("--num_samples", default=100, type=int) parser.add_argument("--floatX", default='float32') parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics') parser.add_argument("--log_weight_score", action='store_true', default=False) # objectives parser.add_argument("--objectives", type=str, default='gsk3b,jnk3,qed,sa') parser.add_argument("--acq_fn", default='UCB', type=str) parser.add_argument("--beta", default=0.1, type=float) parser.add_argument("--scalar", default='WeightedSum', type=str) parser.add_argument("--alpha", default=1., type=float, help='dirichlet distribution') parser.add_argument("--alpha_vector", default='1,1,1,1', type=str) # Proxy parser.add_argument("--proxy_normalize", action='store_true', default=False, help='normalize Y') parser.add_argument("--proxy_num_iterations", default=10000, type=int) parser.add_argument("--proxy_learning_rate", default=2.5e-4, help="Learning rate", type=float) parser.add_argument("--proxy_mbsize", default=64, help="Minibatch size", type=int) parser.add_argument("--proxy_early_stop_tol", default=10, type=int) parser.add_argument("--proxy_repr_type", default='atom_graph') parser.add_argument("--proxy_model_version", default='v2') parser.add_argument("--proxy_num_conv_steps", default=12, type=int) parser.add_argument("--proxy_nemb", default=64, help="#hidden", type=int) parser.add_argument("--proxy_weight_decay", default=1e-6, help="Weight Decay in Proxy", type=float) parser.add_argument("--proxy_uncertainty", default="evidential", type=str) # deep ensemble and GP parser.add_argument("--proxy_dropout", default=0.1, help="MC Dropout in Proxy", type=float) parser.add_argument("--proxy_num_dropout_samples", default=5, type=int) parser.add_argument("--evidential_lam", default=0.1, type=float) parser.add_argument( "--fp_radius", type=int, default=2, help="Morgan fingerprint radius." ) parser.add_argument( "--fp_nbits", type=int, default=1024, help="Morgan fingerprint nBits." ) # GFlowNet parser.add_argument("--min_blocks", default=2, type=int) parser.add_argument("--max_blocks", default=8, type=int) parser.add_argument("--num_iterations", default=5000, type=int) parser.add_argument("--criterion", default="FM", type=str) parser.add_argument("--learning_rate", default=5e-4, help="Learning rate", type=float) parser.add_argument("--Z_learning_rate", default=5e-3, help="Learning rate", type=float) parser.add_argument("--clip_grad", default=0, type=float) parser.add_argument("--trajectories_mbsize", default=8, type=int) parser.add_argument("--offline_mbsize", default=8, type=int) parser.add_argument("--hindsight_prob", default=0.2, type=float) parser.add_argument("--hindsight_buffer_mbsize", default=8, type=int) parser.add_argument("--hindsight_trajectories_mbsize", default=8, type=int) parser.add_argument("--reward_min", default=1e-2, type=float) parser.add_argument("--reward_norm", default=1, type=float) parser.add_argument("--reward_exp", default=8, type=float) parser.add_argument("--reward_exp_ramping", default=0, type=float) parser.add_argument("--logit_clipping", default=0., type=float) # Hyperparameters for TB parser.add_argument("--partition_init", default=1, type=float) # Hyperparameters for FM parser.add_argument("--log_reg_c", default=(0.1/8)**4, type=float) parser.add_argument("--balanced_loss", default=True) parser.add_argument("--leaf_coef", default=10, type=float) # Architecture parser.add_argument("--repr_type", default='block_graph') parser.add_argument("--model_version", default='v4') parser.add_argument("--num_conv_steps", default=10, type=int) parser.add_argument("--nemb", default=256, help="#hidden", type=int) parser.add_argument("--weight_decay", default=0, type=float) parser.add_argument("--random_action_prob", default=0.05, type=float) parser.add_argument("--bootstrap_tau", default=0, type=float) parser.add_argument("--condition_type", type=str, default='HN') parser.add_argument("--ray_hidden_dim", default=100, type=int) return parser.parse_args() class BoRolloutWorker(RolloutWorker): def __init__(self, args, bpath, proxy, device): super(BoRolloutWorker, self).__init__(args, bpath, proxy, device) self.hindsight_prob = args.hindsight_prob self.hindsight_mols = defaultdict(list) self.hindsight_smiles = defaultdict(list) self.replay_threshold = 0.9 def _get(self, i, dset, weights=None): # Sample trajectories by walking backwards from the molecules in our dataset # Handle possible multithreading issues when independent threads # add/substract from dset: m = dset[i] if not isinstance(m, BlockMoleculeDataExtended): m = m[-1] r, raw_r = self._get_reward(m, weights) done = 1 samples = [] # a sample is a tuple (parents(s), parent actions, reward(s), s, done) # an action is (blockidx, stemidx) or (-1, x) for 'stop' # so we start with the stop action, unless the molecule is already # a "terminal" node (if it has no stems, no actions). if len(m.stems) and len(m.blocks) < self.max_blocks: samples.append(((m,), ((-1, 0),), weights, weights, r, m, done)) r = done = 0 while len(m.blocks): # and go backwards if self.ignore_parents: parents = self.mdp.parents(m) parent, action = parents[self.train_rng.randint(len(parents))] samples.append(((parent,), (action,), weights, weights, r, m, done)) r = done = 0 m = parent else: parents, actions = zip(*self.mdp.parents(m)) samples.append((parents, actions, weights.repeat(len(parents), 1), weights, r, m, done)) r = done = 0 m = parents[self.train_rng.randint(len(parents))] return samples[::-1] def _add_mol_to_replay(self, m): for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) if len(self.hindsight_mols[i]) < self.max_hindsight_mols or raw_r[0] > self.hindsight_mols[i][0][0]: if m.smiles not in self.hindsight_smiles[i]: self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) self.hindsight_smiles[i].append(m.smiles) if len(self.hindsight_mols[i]) > self.max_hindsight_mols: self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0]))[ max(int(0.05 * self.max_hindsight_mols), 1):] self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def _add_mol_to_online(self, r, m, inflow): if self.replay_mode == 'online': r = r + self.train_rng.normal() * 0.01 if len(self.online_mols) < self.max_online_mols or r > self.online_mols[0][0]: self.online_mols.append((r, m)) if len(self.online_mols) > self.max_online_mols: self.online_mols = sorted(self.online_mols)[ max(int(0.05 * self.max_online_mols), 1):] elif self.replay_mode == 'prioritized': self.online_mols.append((abs(inflow - np.log(r)), m)) if len(self.online_mols) > self.max_online_mols * 1.1: self.online_mols = self.online_mols[-self.max_online_mols:] def _get_reward(self, m, weights=None): rdmol = m.mol if rdmol is None: return self.reward_min # get reward from proxy raw_reward, score = self.proxy(m, weights) raw_reward = raw_reward.clip(self.reward_min) reward = self.l2r(raw_reward) return reward, (raw_reward, score) def execute_train_episode_batch(self, generator, dataset=None, Y_bounds=None, use_rand_policy=True): if self.train_rng.uniform() < self.hindsight_prob: idx = self.train_rng.randint(self.test_weights.shape[0]) weights = self.test_weights[idx].unsqueeze(0) samples = sum((self.rollout(generator, use_rand_policy, weights) for i in range(self.args.hindsight_trajectories_mbsize)), []) if self.args.hindsight_buffer_mbsize > 0: buffer = deepcopy(self.hindsight_mols[idx]) reward = np.array([x[0] for x in buffer]) prob = reward / sum(reward) eidx = np.random.choice(list(range(len(buffer))), self.args.hindsight_buffer_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, buffer, weights) for i in eidx), []) samples += offline_samples else: weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better samples = sum((self.rollout(generator, use_rand_policy, weights, replay=True) for i in range(self.args.trajectories_mbsize)), []) # offline sampling from dataset if self.args.offline_mbsize > 0 and dataset is not None: # use the oracle reward scores = torch.tensor(pd.DataFrame.from_dict(dataset.scores).values, dtype=torch.float32).to(args.device) if Y_bounds is not None: scores = normalize(scores, Y_bounds) reward = torch.matmul(scores, weights.reshape(-1, 1)) prob = (reward / sum(reward)).squeeze(1).cpu().numpy() eidx = np.random.choice(list(range(len(dataset.all_mols))), self.args.offline_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, dataset.all_mols, weights) for i in eidx), []) samples += offline_samples return zip(*samples) def initialize_hindsight_mols(self, dataset): for m in dataset.all_mols: for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) for i, weights in enumerate(self.test_weights): self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0])) self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def train_generative_model(args, generator, bpath, proxy, oracle, dataset, test_weights, round_idx, do_save=False): print("Training generator...") os.makedirs(os.path.join(args.log_dir, f'round_{round_idx}'), exist_ok=True) device = args.device rollout_worker = BoRolloutWorker(args, bpath, proxy, device) rollout_worker.test_weights = torch.tensor(test_weights).to(device) rollout_worker.initialize_hindsight_mols(dataset) Y_bounds = torch.stack([proxy.partitioning.Y.min(dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) def save_stuff(round_idx, iter): torch.save(generator.state_dict(), os.path.join( args.log_dir, 'round_{}/{}_generator_checkpoint.pth'.format(round_idx, iter))) pickle.dump(rollout_worker.sampled_mols, gzip.open(f'{args.log_dir}/sampled_mols.pkl.gz', 'wb')) multi_thread = not args.debug if multi_thread: sampler = rollout_worker.start_samplers(generator, 8, dataset) def stop_everything(): print('joining') rollout_worker.stop_samplers_and_join() last_losses = [] train_losses = [] test_losses = [] test_infos = [] train_infos = [] time_last_check = time.time() for i in range(args.num_iterations + 1): if multi_thread: r = sampler() for thread in rollout_worker.sampler_threads: if thread.failed: stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, Y_bounds, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(i), 3) for i in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/round{}/train'.format(round_idx), train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, volume_oracle, reward_weight, reward_mean, test_loss, diversity = sample_batch( args, generator, rollout_worker, oracle, proxy, Y_bounds, compute_multi_objective_metric=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes'.format(round_idx), volume, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes_oracle'.format(round_idx), volume_oracle, use_context=False) args.logger.add_scalars( 'round{}/Top-100-sampled/reward_weight'.format(round_idx), reward_weight, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/reward_mean'.format(round_idx), reward_mean, use_context=False) # reward_mean is a dict, the keys are test_weights args.logger.add_scalar( 'round{}/Top-100-sampled/test_loss'.format(round_idx), test_loss, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/dists'.format(round_idx), diversity, use_context=False) if do_save: save_stuff(round_idx, i) stop_everything() if do_save: save_stuff(round_idx, i) checkpoint_path = os.path.join(args.log_dir, f'round_{round_idx}/{i}_generator_checkpoint.pth') generator.load_state_dict(torch.load(checkpoint_path)) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def sample_batch(args, generator, rollout_worker, oracle=None, proxy=None, ref_mols=None, Y_bounds=None, compute_multi_objective_metric=False): score_succ = {'gsk3b': 0.5, 'jnk3': 0.5, 'drd2': 0.5, 'chemprop_sars': 0.5, 'chemprop_hiv': 0.5, "seh": 0.5, 'qed': 0.6, 'sa': 0.67} if Y_bounds is None: Y_bounds = torch.stack([proxy.partitioning.Y.min( dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) time_start = time.time() print(f"Sampling molecules...") raw_rewards = [] raw_rewards_weight = {} means = [] picked_mols = [] smis = [] for i, weights in enumerate(rollout_worker.test_weights): sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" dpath = "./data/docked_mols.h5" # Initialize oracle and dataset (for training surrogate function) oracle = Oracle(args) dataset = Dataset(args, bpath, oracle, args.device) dataset.load_h5(dpath, num_init_examples=args.num_init_examples) log_overall_metrics(args, dataset) args.n_objectives = len(args.objectives) # Initialize surrogate function proxy = get_proxy(args, bpath, oracle) proxy.update(dataset, 0, reset=False) for i in range(1, args.num_outer_loop_iters+1): print(f"====== Starting round {i} ======") args.logger.set_context('iter_{}'.format(i)) test_weights = np.random.dirichlet(args.alpha_vector, 5*(2**(args.n_objectives-2))).astype(np.float32) if args.criterion == 'TB': generator = TBGFlowNet(args, bpath) elif args.criterion == 'FM': generator = FMGFlowNet(args, bpath) elif args.criterion == 'Reinforce': generator = MOReinforce(args, bpath) else: raise ValueError('Not implemented!') rollout_worker, training_metrics = train_generative_model( args, generator, bpath, proxy, oracle, dataset, test_weights, i, do_save=args.save) # sample molecule batch from generator and update dataset with oracle scores for sampled batch batch, batch_infos, MultiObjective_metrics = sample_batch( args, generator, rollout_worker, oracle, proxy, compute_multi_objective_metric=True) dataset.add_samples(batch) log_overall_metrics(args, dataset, batch_infos, MultiObjective_metrics) args.logger.save(os.path.join(args.log_dir, 'logged_data.pkl.gz')) # update proxy with new data if i != args.num_outer_loop_iters: proxy.update(dataset, i, reset=True) if __name__ == '__main__': args = arg_parse()
args.logger = get_logger(args)
14
2023-10-24 14:10:35+00:00
24k
caglarkucuk/earthformer-satellite-to-radar
ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_unet_dec.py
[ { "identifier": "Upsample3DLayer", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class Upsample3DLayer(nn.Module):\n \"\"\"Upsampling based on nn.UpSampling and Conv3x3.\n\n If the temporal dimension remains the same:\n x --> interpolation-2d (nearest) --> conv3x3(dim, out_dim)\n Else:\n x --> interpolation-3d (nearest) --> conv3x3x3(dim, out_dim)\n\n \"\"\"\n def __init__(self,\n dim,\n out_dim,\n target_size,\n temporal_upsample=False,\n kernel_size=3,\n layout='THWC',\n conv_init_mode=\"0\",\n ):\n \"\"\"\n\n Parameters\n ----------\n dim\n out_dim\n target_size\n Size of the output tensor. Will be a tuple/list that contains T_new, H_new, W_new\n temporal_upsample\n Whether the temporal axis will go through upsampling.\n kernel_size\n The kernel size of the Conv2D layer\n layout\n The layout of the inputs\n \"\"\"\n super(Upsample3DLayer, self).__init__()\n self.conv_init_mode = conv_init_mode\n self.target_size = target_size\n self.out_dim = out_dim\n self.temporal_upsample = temporal_upsample\n if temporal_upsample:\n self.up = nn.Upsample(size=target_size, mode='nearest') # 3D upsampling\n else:\n self.up = nn.Upsample(size=(target_size[1], target_size[2]), mode='nearest') # 2D upsampling\n self.conv = nn.Conv2d(in_channels=dim, out_channels=out_dim, kernel_size=(kernel_size, kernel_size),\n padding=(kernel_size // 2, kernel_size // 2))\n assert layout in ['THWC', 'CTHW']\n self.layout = layout\n\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode)\n\n def forward(self, x):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C) or (B, C, T, H, W)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_out, C_out) or (B, C, T, H_out, W_out)\n \"\"\"\n if self.layout == 'THWC':\n B, T, H, W, C = x.shape\n if self.temporal_upsample:\n x = x.permute(0, 4, 1, 2, 3) # (B, C, T, H, W)\n return self.conv(self.up(x)).permute(0, 2, 3, 4, 1)\n else:\n assert self.target_size[0] == T\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2) # (B * T, C, H, W)\n x = self.up(x)\n return self.conv(x).permute(0, 2, 3, 1).reshape((B,) + self.target_size + (self.out_dim,))\n elif self.layout == 'CTHW':\n B, C, T, H, W = x.shape\n if self.temporal_upsample:\n return self.conv(self.up(x))\n else:\n assert self.output_size[0] == T\n x = x.permute(0, 2, 1, 3, 4) # (B, T, C, H, W)\n x = x.reshape(B * T, C, H, W)\n return self.conv(self.up(x)).reshape(B, self.target_size[0], self.out_dim, self.target_size[1],\n self.target_size[2]).permute(0, 2, 1, 3, 4)" }, { "identifier": "PatchMerging3D", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class PatchMerging3D(nn.Module):\n \"\"\" Patch Merging Layer\"\"\"\n def __init__(self,\n dim,\n out_dim=None,\n downsample=(1, 2, 2),\n norm_layer='layer_norm',\n padding_type='nearest',\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n \"\"\"\n\n Parameters\n ----------\n dim\n Number of input channels.\n downsample\n downsample factor\n norm_layer\n The normalization layer\n \"\"\"\n super().__init__()\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n self.dim = dim\n if out_dim is None:\n out_dim = max(downsample) * dim\n self.out_dim = out_dim\n self.downsample = downsample\n self.padding_type = padding_type\n self.reduction = nn.Linear(downsample[0] * downsample[1] * downsample[2] * dim,\n out_dim, bias=False)\n self.norm = get_norm_layer(norm_layer, in_channels=downsample[0] * downsample[1] * downsample[2] * dim)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def get_out_shape(self, data_shape):\n T, H, W, C_in = data_shape\n pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0]\n pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1]\n pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2]\n return (T + pad_t) // self.downsample[0], (H + pad_h) // self.downsample[1], (W + pad_w) // self.downsample[2],\\\n self.out_dim\n\n def forward(self, x):\n \"\"\"\n\n Parameters\n ----------\n x\n Input feature, tensor size (B, T, H, W, C).\n\n Returns\n -------\n out\n Shape (B, T // downsample[0], H // downsample[1], W // downsample[2], out_dim)\n \"\"\"\n B, T, H, W, C = x.shape\n\n # padding\n pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0]\n pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1]\n pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2]\n if pad_h or pad_h or pad_w:\n T += pad_t\n H += pad_h\n W += pad_w\n x = _generalize_padding(x, pad_t, pad_w, pad_h, padding_type=self.padding_type)\n\n x = x.reshape((B,\n T // self.downsample[0], self.downsample[0],\n H // self.downsample[1], self.downsample[1],\n W // self.downsample[2], self.downsample[2], C)) \\\n .permute(0, 1, 3, 5, 2, 4, 6, 7) \\\n .reshape(B, T // self.downsample[0], H // self.downsample[1], W // self.downsample[2],\n self.downsample[0] * self.downsample[1] * self.downsample[2] * C)\n x = self.norm(x)\n x = self.reduction(x)\n\n return x" }, { "identifier": "PosEmbed", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class PosEmbed(nn.Module):\n\n def __init__(self, embed_dim, maxT, maxH, maxW, typ='t+h+w'):\n r\"\"\"\n Parameters\n ----------\n embed_dim\n maxT\n maxH\n maxW\n typ\n The type of the positional embedding.\n - t+h+w:\n Embed the spatial position to embeddings\n - t+hw:\n Embed the spatial position to embeddings\n \"\"\"\n super(PosEmbed, self).__init__()\n self.typ = typ\n\n assert self.typ in ['t+h+w', 't+hw']\n self.maxT = maxT\n self.maxH = maxH\n self.maxW = maxW\n self.embed_dim = embed_dim\n # spatiotemporal learned positional embedding\n if self.typ == 't+h+w':\n self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)\n self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim)\n self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim)\n\n # nn.init.trunc_normal_(self.T_embed.weight, std=0.02)\n # nn.init.trunc_normal_(self.H_embed.weight, std=0.02)\n # nn.init.trunc_normal_(self.W_embed.weight, std=0.02)\n elif self.typ == 't+hw':\n self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)\n self.HW_embed = nn.Embedding(num_embeddings=maxH * maxW, embedding_dim=embed_dim)\n # nn.init.trunc_normal_(self.T_embed.weight, std=0.02)\n # nn.init.trunc_normal_(self.HW_embed.weight, std=0.02)\n else:\n raise NotImplementedError\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m, embed_mode=\"0\")\n\n def forward(self, x):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Return the x + positional embeddings\n \"\"\"\n _, T, H, W, _ = x.shape\n t_idx = torch.arange(T, device=x.device) # (T, C)\n h_idx = torch.arange(H, device=x.device) # (H, C)\n w_idx = torch.arange(W, device=x.device) # (W, C)\n if self.typ == 't+h+w':\n return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim)\\\n + self.H_embed(h_idx).reshape(1, H, 1, self.embed_dim)\\\n + self.W_embed(w_idx).reshape(1, 1, W, self.embed_dim)\n elif self.typ == 't+hw':\n spatial_idx = h_idx.unsqueeze(-1) * self.maxW + w_idx\n return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim) + self.HW_embed(spatial_idx)\n else:\n raise NotImplementedError" }, { "identifier": "InitialEncoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class InitialEncoder(nn.Module):\n def __init__(self,\n dim,\n out_dim,\n downsample_scale: Union[int, Sequence[int]],\n num_conv_layers=2,\n activation='leaky',\n padding_type='nearest',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(InitialEncoder, self).__init__()\n\n self.num_conv_layers = num_conv_layers\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n conv_block = []\n for i in range(num_conv_layers):\n if i == 0:\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(16, out_dim))\n conv_block.append(get_activation(activation))\n else:\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=out_dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(16, out_dim))\n conv_block.append(get_activation(activation))\n\n self.conv_block = nn.Sequential(*conv_block)\n if isinstance(downsample_scale, int):\n patch_merge_downsample = (1, downsample_scale, downsample_scale)\n elif len(downsample_scale) == 2:\n patch_merge_downsample = (1, *downsample_scale)\n elif len(downsample_scale) == 3:\n patch_merge_downsample = tuple(downsample_scale)\n else:\n raise NotImplementedError(f\"downsample_scale {downsample_scale} format not supported!\")\n self.patch_merge = PatchMerging3D(\n dim=out_dim, out_dim=out_dim,\n padding_type=padding_type,\n downsample=patch_merge_downsample,\n linear_init_mode=linear_init_mode,\n norm_init_mode=norm_init_mode)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def forward(self, x):\n \"\"\"\n\n x --> [K x Conv2D] --> PatchMerge\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C_out)\n \"\"\"\n B, T, H, W, C = x.shape\n if self.num_conv_layers > 0:\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = self.conv_block(x).permute(0, 2, 3, 1) # (B * T, H, W, C_new)\n x = self.patch_merge(x.reshape(B, T, H, W, -1))\n else:\n x = self.patch_merge(x)\n return x" }, { "identifier": "FinalDecoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class FinalDecoder(nn.Module):\n\n def __init__(self,\n target_thw,\n dim,\n num_conv_layers=2,\n activation='leaky',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(FinalDecoder, self).__init__()\n self.target_thw = target_thw\n self.dim = dim\n self.num_conv_layers = num_conv_layers\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n conv_block = []\n for i in range(num_conv_layers):\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1), in_channels=dim, out_channels=dim))\n conv_block.append(nn.GroupNorm(16, dim))\n conv_block.append(get_activation(activation))\n self.conv_block = nn.Sequential(*conv_block)\n self.upsample = Upsample3DLayer(\n dim=dim, out_dim=dim,\n target_size=target_thw, kernel_size=3,\n conv_init_mode=conv_init_mode)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def forward(self, x):\n \"\"\"\n\n x --> Upsample --> [K x Conv2D]\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C)\n \"\"\"\n x = self.upsample(x)\n if self.num_conv_layers > 0:\n B, T, H, W, C = x.shape\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = self.conv_block(x).permute(0, 2, 3, 1).reshape(B, T, H, W, -1)\n return x" }, { "identifier": "InitialStackPatchMergingEncoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class InitialStackPatchMergingEncoder(nn.Module):\n\n def __init__(self,\n num_merge: int,\n in_dim,\n out_dim_list,\n downsample_scale_list,\n num_conv_per_merge_list=None,\n activation='leaky',\n padding_type='nearest',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(InitialStackPatchMergingEncoder, self).__init__()\n\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n self.num_merge = num_merge\n self.in_dim = in_dim\n self.out_dim_list = out_dim_list[:num_merge]\n self.downsample_scale_list = downsample_scale_list[:num_merge]\n self.num_conv_per_merge_list = num_conv_per_merge_list\n self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list]\n\n self.conv_block_list = nn.ModuleList()\n self.patch_merge_list = nn.ModuleList()\n for i in range(num_merge):\n if i == 0:\n in_dim = in_dim\n else:\n in_dim = self.out_dim_list[i - 1]\n out_dim = self.out_dim_list[i]\n downsample_scale = self.downsample_scale_list[i]\n\n conv_block = []\n for j in range(self.num_conv_per_merge_list[i]):\n if j == 0:\n conv_in_dim = in_dim\n else:\n conv_in_dim = out_dim\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=conv_in_dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(self.num_group_list[i], out_dim))\n conv_block.append(get_activation(activation))\n\n conv_block = nn.Sequential(*conv_block)\n self.conv_block_list.append(conv_block)\n patch_merge = PatchMerging3D(\n dim=out_dim, out_dim=out_dim,\n padding_type=padding_type,\n downsample=(1, downsample_scale, downsample_scale),\n linear_init_mode=linear_init_mode,\n norm_init_mode=norm_init_mode)\n self.patch_merge_list.append(patch_merge)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def get_out_shape_list(self, input_shape):\n \"\"\"\n T, H, W, C\n \"\"\"\n out_shape_list = []\n for patch_merge in self.patch_merge_list:\n input_shape = patch_merge.get_out_shape(input_shape)\n out_shape_list.append(input_shape)\n return out_shape_list\n\n def forward(self, x):\n \"\"\"\n\n x --> [K x Conv2D] --> PatchMerge --> ... --> [K x Conv2D] --> PatchMerge\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C_out)\n \"\"\"\n for i, (conv_block, patch_merge) in \\\n enumerate(zip(self.conv_block_list, self.patch_merge_list)):\n B, T, H, W, C = x.shape\n if self.num_conv_per_merge_list[i] > 0:\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = conv_block(x).permute(0, 2, 3, 1).reshape(B, T, H, W, -1)\n x = patch_merge(x)\n return x" }, { "identifier": "FinalStackUpsamplingDecoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class FinalStackUpsamplingDecoder(nn.Module):\n\n def __init__(self,\n target_shape_list,\n in_dim,\n num_conv_per_up_list=None,\n activation='leaky',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n \"\"\"\n Parameters\n ----------\n target_shape_list:\n list of (T, H ,W ,C)\n \"\"\"\n super(FinalStackUpsamplingDecoder, self).__init__()\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n self.target_shape_list = target_shape_list\n self.out_dim_list = [target_shape[-1] for target_shape in self.target_shape_list]\n self.num_upsample = len(target_shape_list)\n self.in_dim = in_dim\n self.num_conv_per_up_list = num_conv_per_up_list\n self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list]\n\n self.conv_block_list = nn.ModuleList()\n self.upsample_list = nn.ModuleList()\n for i in range(self.num_upsample):\n if i == 0:\n in_dim = in_dim\n else:\n in_dim = self.out_dim_list[i - 1]\n out_dim = self.out_dim_list[i]\n\n upsample = Upsample3DLayer(\n dim=in_dim, out_dim=in_dim,\n target_size=target_shape_list[i][:-1], kernel_size=3,\n conv_init_mode=conv_init_mode)\n self.upsample_list.append(upsample)\n conv_block = []\n for j in range(num_conv_per_up_list[i]):\n if j == 0:\n conv_in_dim = in_dim\n else:\n conv_in_dim = out_dim\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=conv_in_dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(self.num_group_list[i], out_dim))\n conv_block.append(get_activation(activation))\n conv_block = nn.Sequential(*conv_block)\n self.conv_block_list.append(conv_block)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n @staticmethod\n def get_init_params(enc_input_shape, enc_out_shape_list, large_channel=False):\n dec_target_shape_list = list(enc_out_shape_list[:-1])[::-1] + [tuple(enc_input_shape), ]\n if large_channel:\n dec_target_shape_list_large_channel = []\n for i, enc_out_shape in enumerate(enc_out_shape_list[::-1]):\n dec_target_shape_large_channel = list(dec_target_shape_list[i])\n dec_target_shape_large_channel[-1] = enc_out_shape[-1]\n dec_target_shape_list_large_channel.append(tuple(dec_target_shape_large_channel))\n dec_target_shape_list = dec_target_shape_list_large_channel\n dec_in_dim = enc_out_shape_list[-1][-1]\n return dec_target_shape_list, dec_in_dim\n\n def forward(self, x):\n \"\"\"\n\n x --> Upsample --> [K x Conv2D] --> ... --> Upsample --> [K x Conv2D]\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C)\n \"\"\"\n for i, (conv_block, upsample) in \\\n enumerate(zip(self.conv_block_list, self.upsample_list)):\n x = upsample(x)\n if self.num_conv_per_up_list[i] > 0:\n B, T, H, W, C = x.shape\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = conv_block(x).permute(0, 2, 3, 1).reshape(B, T, H, W, -1)\n return x" }, { "identifier": "StackCuboidSelfAttentionBlock", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class StackCuboidSelfAttentionBlock(nn.Module):\n \"\"\"\n\n - \"use_inter_ffn\" is True\n x --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out\n | ^ | ^\n | | | |\n |-------------| |-------------|\n - \"use_inter_ffn\" is False\n x --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out\n | ^ | ^ ^ | ^\n | | | | | | |\n |-------------| |------------| ----------| |-----------|\n If we have enabled global memory vectors, each attention will be a\n\n \"\"\"\n def __init__(self,\n dim,\n num_heads,\n block_cuboid_size=[(4, 4, 4), (4, 4, 4)],\n block_shift_size=[(0, 0, 0), (2, 2, 2)],\n block_strategy=[('d', 'd', 'd'),\n ('l', 'l', 'l')],\n padding_type='ignore',\n qkv_bias=False,\n qk_scale=None,\n attn_drop=0.0,\n proj_drop=0.0,\n ffn_drop=0.0,\n activation='leaky',\n gated_ffn=False,\n norm_layer='layer_norm',\n use_inter_ffn=False,\n use_global_vector=False,\n use_global_vector_ffn=True,\n use_global_self_attn=False,\n separate_global_qkv=False,\n global_dim_ratio=1,\n checkpoint_level=True,\n use_relative_pos=True,\n use_final_proj=True,\n # initialization\n attn_linear_init_mode=\"0\",\n ffn_linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(StackCuboidSelfAttentionBlock, self).__init__()\n # initialization\n self.attn_linear_init_mode = attn_linear_init_mode\n self.ffn_linear_init_mode = ffn_linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n assert len(block_cuboid_size[0]) > 0 and len(block_shift_size) > 0 and len(block_strategy) > 0,\\\n f'Format of the block cuboid size is not correct.' \\\n f' block_cuboid_size={block_cuboid_size}'\n assert len(block_cuboid_size) == len(block_shift_size) == len(block_strategy)\n self.num_attn = len(block_cuboid_size)\n self.checkpoint_level = checkpoint_level\n self.use_inter_ffn = use_inter_ffn\n # global vectors\n self.use_global_vector = use_global_vector\n self.use_global_vector_ffn = use_global_vector_ffn\n self.use_global_self_attn = use_global_self_attn\n self.global_dim_ratio = global_dim_ratio\n\n if self.use_inter_ffn:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim,\n hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for _ in range(self.num_attn)])\n if self.use_global_vector_ffn and self.use_global_vector:\n self.global_ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=global_dim_ratio * dim,\n hidden_size=global_dim_ratio * 4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for _ in range(self.num_attn)])\n else:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim, hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn, activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)])\n if self.use_global_vector_ffn and self.use_global_vector:\n self.global_ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=global_dim_ratio * dim,\n hidden_size=global_dim_ratio * 4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn, activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)])\n self.attn_l = nn.ModuleList(\n [CuboidSelfAttentionLayer(\n dim=dim, num_heads=num_heads,\n cuboid_size=ele_cuboid_size,\n shift_size=ele_shift_size,\n strategy=ele_strategy,\n padding_type=padding_type,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n attn_drop=attn_drop,\n proj_drop=proj_drop,\n norm_layer=norm_layer,\n use_global_vector=use_global_vector,\n use_global_self_attn=use_global_self_attn,\n separate_global_qkv=separate_global_qkv,\n global_dim_ratio=global_dim_ratio,\n checkpoint_level=checkpoint_level,\n use_relative_pos=use_relative_pos,\n use_final_proj=use_final_proj,\n attn_linear_init_mode=attn_linear_init_mode,\n ffn_linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for ele_cuboid_size, ele_shift_size, ele_strategy\n in zip(block_cuboid_size, block_shift_size, block_strategy)])\n\n def reset_parameters(self):\n for m in self.ffn_l:\n m.reset_parameters()\n if self.use_global_vector_ffn and self.use_global_vector:\n for m in self.global_ffn_l:\n m.reset_parameters()\n for m in self.attn_l:\n m.reset_parameters()\n\n def forward(self, x, global_vectors=None):\n if self.use_inter_ffn:\n if self.use_global_vector:\n for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)):\n if self.checkpoint_level >= 2 and self.training:\n x_out, global_vectors_out = checkpoint.checkpoint(attn, x, global_vectors)\n else:\n x_out, global_vectors_out = attn(x, global_vectors)\n x = x + x_out\n global_vectors = global_vectors + global_vectors_out\n\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(ffn, x)\n if self.use_global_vector_ffn:\n global_vectors = checkpoint.checkpoint(self.global_ffn_l[idx], global_vectors)\n else:\n x = ffn(x)\n if self.use_global_vector_ffn:\n global_vectors = self.global_ffn_l[idx](global_vectors)\n return x, global_vectors\n else:\n for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)):\n if self.checkpoint_level >= 2 and self.training:\n x = x + checkpoint.checkpoint(attn, x)\n else:\n x = x + attn(x)\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(ffn, x)\n else:\n x = ffn(x)\n return x\n else:\n if self.use_global_vector:\n for idx, attn in enumerate(self.attn_l):\n if self.checkpoint_level >= 2 and self.training:\n x_out, global_vectors_out = checkpoint.checkpoint(attn, x, global_vectors)\n else:\n x_out, global_vectors_out = attn(x, global_vectors)\n x = x + x_out\n global_vectors = global_vectors + global_vectors_out\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(self.ffn_l[0], x)\n if self.use_global_vector_ffn:\n global_vectors = checkpoint.checkpoint(self.global_ffn_l[0], global_vectors)\n else:\n x = self.ffn_l[0](x)\n if self.use_global_vector_ffn:\n global_vectors = self.global_ffn_l[0](global_vectors)\n return x, global_vectors\n else:\n for idx, attn in enumerate(self.attn_l):\n if self.checkpoint_level >= 2 and self.training:\n out = checkpoint.checkpoint(attn, x)\n else:\n out = attn(x)\n x = x + out\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(self.ffn_l[0], x)\n else:\n x = self.ffn_l[0](x)\n return x" }, { "identifier": "StackCuboidCrossAttentionBlock", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class StackCuboidCrossAttentionBlock(nn.Module):\n \"\"\"A stack of cuboid cross attention layers.\n\n The advantage of cuboid attention is that we can combine cuboid attention building blocks with different\n hyper-parameters to mimic a broad range of space-time correlation patterns.\n\n - \"use_inter_ffn\" is True\n x, mem --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out\n | ^ | ^\n | | | |\n |-------------|----|-------------|\n - \"use_inter_ffn\" is False\n x, mem --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out, mem\n | ^ | ^ ^ | ^\n | | | | | | |\n |-------------|----|------------|-- ----------|--|-----------|\n \"\"\"\n def __init__(self,\n dim,\n num_heads,\n block_cuboid_hw=[(4, 4), (4, 4)],\n block_shift_hw=[(0, 0), (2, 2)],\n block_n_temporal=[1, 2],\n block_strategy=[('d', 'd', 'd'),\n ('l', 'l', 'l')],\n padding_type='ignore',\n cross_last_n_frames=None,\n qkv_bias=False,\n qk_scale=None,\n attn_drop=0.0,\n proj_drop=0.0,\n ffn_drop=0.0,\n activation='leaky',\n gated_ffn=False,\n norm_layer='layer_norm',\n use_inter_ffn=True,\n max_temporal_relative=50,\n checkpoint_level=1,\n use_relative_pos=True,\n # global vectors\n use_global_vector=False,\n separate_global_qkv=False,\n global_dim_ratio=1,\n # initialization\n attn_linear_init_mode=\"0\",\n ffn_linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(StackCuboidCrossAttentionBlock, self).__init__()\n # initialization\n self.attn_linear_init_mode = attn_linear_init_mode\n self.ffn_linear_init_mode = ffn_linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n assert len(block_cuboid_hw[0]) > 0 and len(block_shift_hw) > 0 and len(block_strategy) > 0,\\\n f'Incorrect format.' \\\n f' block_cuboid_hw={block_cuboid_hw}, block_shift_hw={block_shift_hw}, block_strategy={block_strategy}'\n assert len(block_cuboid_hw) == len(block_shift_hw) == len(block_strategy)\n self.num_attn = len(block_cuboid_hw)\n self.checkpoint_level = checkpoint_level\n self.use_inter_ffn = use_inter_ffn\n self.use_global_vector = use_global_vector\n if self.use_inter_ffn:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim,\n hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for _ in range(self.num_attn)])\n else:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim,\n hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)])\n self.attn_l = nn.ModuleList(\n [CuboidCrossAttentionLayer(\n dim=dim,\n num_heads=num_heads,\n cuboid_hw=ele_cuboid_hw,\n shift_hw=ele_shift_hw,\n strategy=ele_strategy,\n n_temporal=ele_n_temporal,\n cross_last_n_frames=cross_last_n_frames,\n padding_type=padding_type,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n attn_drop=attn_drop,\n proj_drop=proj_drop,\n norm_layer=norm_layer,\n max_temporal_relative=max_temporal_relative,\n use_global_vector=use_global_vector,\n separate_global_qkv=separate_global_qkv,\n global_dim_ratio=global_dim_ratio,\n checkpoint_level=checkpoint_level,\n use_relative_pos=use_relative_pos,\n attn_linear_init_mode=attn_linear_init_mode,\n ffn_linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal\n in zip(block_cuboid_hw, block_shift_hw, block_strategy, block_n_temporal)])\n\n def reset_parameters(self):\n for m in self.ffn_l:\n m.reset_parameters()\n for m in self.attn_l:\n m.reset_parameters()\n\n def forward(self, x, mem, mem_global_vector=None):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T_x, H, W, C)\n mem\n Shape (B, T_mem, H, W, C)\n mem_global_vector\n Shape (B, N_global, C)\n\n Returns\n -------\n out\n Shape (B, T_x, H, W, C_out)\n \"\"\"\n if self.use_inter_ffn:\n for attn, ffn in zip(self.attn_l, self.ffn_l):\n if self.checkpoint_level >= 2 and self.training:\n x = x + checkpoint.checkpoint(attn, x, mem, mem_global_vector)\n else:\n x = x + attn(x, mem, mem_global_vector)\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(ffn, x)\n else:\n x = ffn(x)\n return x\n else:\n for attn in self.attn_l:\n if self.checkpoint_level >= 2 and self.training:\n x = x + checkpoint.checkpoint(attn, x, mem, mem_global_vector)\n else:\n x = x + attn(x, mem, mem_global_vector)\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(self.ffn_l[0], x)\n else:\n x = self.ffn_l[0](x)\n return x" }, { "identifier": "CuboidTransformerEncoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class CuboidTransformerEncoder(nn.Module):\n \"\"\"Encoder of the CuboidTransformer\n\n x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out\n\n \"\"\"\n def __init__(self,\n input_shape,\n base_units=128,\n block_units=None,\n scale_alpha=1.0,\n depth=[4, 4, 4],\n downsample=2,\n downsample_type='patch_merge',\n block_attn_patterns=None,\n block_cuboid_size=[(4, 4, 4),\n (4, 4, 4)],\n block_strategy=[('l', 'l', 'l'),\n ('d', 'd', 'd')],\n block_shift_size=[(0, 0, 0),\n (0, 0, 0)],\n num_heads=4,\n attn_drop=0.0,\n proj_drop=0.0,\n ffn_drop=0.0,\n activation=\"leaky\",\n ffn_activation='leaky',\n gated_ffn=False,\n norm_layer='layer_norm',\n use_inter_ffn=True,\n padding_type='ignore',\n checkpoint_level=True,\n use_relative_pos=True,\n self_attn_use_final_proj=True,\n # global vectors\n use_global_vector=False,\n use_global_vector_ffn=True,\n use_global_self_attn=False,\n separate_global_qkv=False,\n global_dim_ratio=1,\n # initialization\n attn_linear_init_mode=\"0\",\n ffn_linear_init_mode=\"0\",\n conv_init_mode=\"0\",\n down_linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n \"\"\"\n\n Parameters\n ----------\n input_shape\n The shape of the input. Contains T, H, W, C\n initial_data_thw\n The shape of the first layer\n base_units\n The number of units\n scale_alpha\n We scale up the channels based on the formula:\n - round_to(base_units * max(downsample_scale) ** units_alpha, 4)\n depth\n The number of layers for each block\n downsample\n The downsample ratio\n downsample_type\n Type of the downsampling layer\n block_attn_patterns\n Attention pattern for the cuboid attention for each block.\n block_cuboid_size\n A list of cuboid size parameters\n block_strategy\n A list of cuboid strategies\n block_shift_size\n A list of shift sizes\n num_global\n The number of global vectors\n num_heads\n The number of heads.\n attn_drop\n proj_drop\n ffn_drop\n gated_ffn\n Whether to enable gated ffn or not\n norm_layer\n The normalization layer\n use_inter_ffn\n Whether to use intermediate FFN\n padding_type\n \"\"\"\n super(CuboidTransformerEncoder, self).__init__()\n # initialization mode\n self.attn_linear_init_mode = attn_linear_init_mode\n self.ffn_linear_init_mode = ffn_linear_init_mode\n self.conv_init_mode = conv_init_mode\n self.down_linear_init_mode = down_linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n self.input_shape = input_shape\n self.depth = depth\n self.num_blocks = len(depth)\n self.base_units = base_units\n self.scale_alpha = scale_alpha\n if not isinstance(downsample, (tuple, list)):\n downsample = (1, downsample, downsample)\n self.downsample = downsample\n self.downsample_type = downsample_type\n self.num_heads = num_heads\n self.use_global_vector = use_global_vector\n self.checkpoint_level = checkpoint_level\n if block_units is None:\n block_units = [round_to(base_units * int((max(downsample) ** scale_alpha) ** i), 4)\n for i in range(self.num_blocks)]\n else:\n assert len(block_units) == self.num_blocks and block_units[0] == base_units\n self.block_units = block_units\n\n if self.num_blocks > 1:\n if downsample_type == 'patch_merge':\n self.down_layers = nn.ModuleList(\n [PatchMerging3D(dim=self.block_units[i],\n downsample=downsample,\n # downsample=(1, 1, 1),\n padding_type=padding_type,\n out_dim=self.block_units[i + 1],\n linear_init_mode=down_linear_init_mode,\n norm_init_mode=norm_init_mode)\n for i in range(self.num_blocks - 1)])\n else:\n raise NotImplementedError\n if self.use_global_vector:\n self.down_layer_global_proj = nn.ModuleList(\n [nn.Linear(in_features=global_dim_ratio*self.block_units[i],\n out_features=global_dim_ratio*self.block_units[i + 1])\n for i in range(self.num_blocks - 1)])\n\n if block_attn_patterns is not None:\n mem_shapes = self.get_mem_shapes()\n if isinstance(block_attn_patterns, (tuple, list)):\n assert len(block_attn_patterns) == self.num_blocks\n else:\n block_attn_patterns = [block_attn_patterns for _ in range(self.num_blocks)]\n block_cuboid_size = []\n block_strategy = []\n block_shift_size = []\n for idx, key in enumerate(block_attn_patterns):\n func = CuboidSelfAttentionPatterns.get(key)\n cuboid_size, strategy, shift_size = func(mem_shapes[idx])\n block_cuboid_size.append(cuboid_size)\n block_strategy.append(strategy)\n block_shift_size.append(shift_size)\n else:\n if not isinstance(block_cuboid_size[0][0], (list, tuple)):\n block_cuboid_size = [block_cuboid_size for _ in range(self.num_blocks)]\n else:\n assert len(block_cuboid_size) == self.num_blocks,\\\n f'Incorrect input format! Received block_cuboid_size={block_cuboid_size}'\n\n if not isinstance(block_strategy[0][0], (list, tuple)):\n block_strategy = [block_strategy for _ in range(self.num_blocks)]\n else:\n assert len(block_strategy) == self.num_blocks,\\\n f'Incorrect input format! Received block_strategy={block_strategy}'\n\n if not isinstance(block_shift_size[0][0], (list, tuple)):\n block_shift_size = [block_shift_size for _ in range(self.num_blocks)]\n else:\n assert len(block_shift_size) == self.num_blocks,\\\n f'Incorrect input format! Received block_shift_size={block_shift_size}'\n self.block_cuboid_size = block_cuboid_size\n self.block_strategy = block_strategy\n self.block_shift_size = block_shift_size\n\n self.blocks = nn.ModuleList([nn.Sequential(\n *[StackCuboidSelfAttentionBlock(\n dim=self.block_units[i],\n num_heads=num_heads,\n block_cuboid_size=block_cuboid_size[i],\n block_strategy=block_strategy[i],\n block_shift_size=block_shift_size[i],\n attn_drop=attn_drop,\n proj_drop=proj_drop,\n ffn_drop=ffn_drop,\n activation=ffn_activation,\n gated_ffn=gated_ffn,\n norm_layer=norm_layer,\n use_inter_ffn=use_inter_ffn,\n padding_type=padding_type,\n use_global_vector=use_global_vector,\n use_global_vector_ffn=use_global_vector_ffn,\n use_global_self_attn=use_global_self_attn,\n separate_global_qkv=separate_global_qkv,\n global_dim_ratio=global_dim_ratio,\n checkpoint_level=checkpoint_level,\n use_relative_pos=use_relative_pos,\n use_final_proj=self_attn_use_final_proj,\n # initialization\n attn_linear_init_mode=attn_linear_init_mode,\n ffn_linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,\n ) for _ in range(depth[i])])\n for i in range(self.num_blocks)])\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.num_blocks > 1:\n for m in self.down_layers:\n m.reset_parameters()\n if self.use_global_vector:\n apply_initialization(self.down_layer_global_proj,\n linear_mode=self.down_linear_init_mode)\n for ms in self.blocks:\n for m in ms:\n m.reset_parameters()\n\n def get_mem_shapes(self):\n \"\"\"Get the shape of the output memory based on the input shape. This can be used for constructing the decoder.\n\n Returns\n -------\n mem_shapes\n A list of shapes of the output memory\n \"\"\"\n\n if self.num_blocks == 1:\n return [self.input_shape]\n else:\n mem_shapes = [self.input_shape]\n curr_shape = self.input_shape\n for down_layer in self.down_layers:\n curr_shape = down_layer.get_out_shape(curr_shape)\n mem_shapes.append(curr_shape)\n return mem_shapes\n\n def forward(self, x, global_vectors=None):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n A list of tensors from the bottom layer to the top layer of the encoder. For example, it can have shape\n - (B, T, H, W, C1)\n - (B, T, H // 2, W // 2, 2 * C1)\n - (B, T, H // 4, W // 4, 4 * C1)\n ...\n global_mem_out\n Optional\n \"\"\"\n B, T, H, W, C_in = x.shape\n assert (T, H, W, C_in) == self.input_shape \n\n if self.use_global_vector:\n out = []\n global_mem_out = []\n for i in range(self.num_blocks):\n for l in self.blocks[i]:\n x, global_vectors = l(x, global_vectors)\n out.append(x)\n global_mem_out.append(global_vectors)\n if self.num_blocks > 1 and i < self.num_blocks - 1:\n x = self.down_layers[i](x)\n global_vectors = self.down_layer_global_proj[i](global_vectors)\n return out, global_mem_out\n else:\n out = []\n for i in range(self.num_blocks):\n x = self.blocks[i](x)\n out.append(x)\n if self.num_blocks > 1 and i < self.num_blocks - 1:\n x = self.down_layers[i](x)\n return out" }, { "identifier": "CuboidSelfAttentionPatterns", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_patterns.py", "snippet": "def full_attention(input_shape):\ndef self_axial(input_shape):\ndef self_video_swin(input_shape, P=2, M=4):\ndef self_divided_space_time(input_shape):\ndef self_spatial_lg_v1(input_shape, M=4):\ndef self_axial_space_dilate_K(input_shape, K=2):\ndef cross_KxK(mem_shape, K):\ndef cross_KxK_lg(mem_shape, K):\ndef cross_KxK_heter(mem_shape, K):\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n P = min(P, T)\n M = min(M, H, W)\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n K = min(K, H, W)\n K = min(K, H, W)\n K = min(K, H, W)\n K = min(K, H, W)" }, { "identifier": "get_activation", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def get_activation(act, inplace=False, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n act\n Name of the activation\n inplace\n Whether to perform inplace activation\n\n Returns\n -------\n activation_layer\n The activation\n \"\"\"\n if act is None:\n return lambda x: x\n if isinstance(act, str):\n if act == 'leaky':\n negative_slope = kwargs.get(\"negative_slope\", 0.1)\n return nn.LeakyReLU(negative_slope, inplace=inplace)\n elif act == 'identity':\n return nn.Identity()\n elif act == 'elu':\n return nn.ELU(inplace=inplace)\n elif act == 'gelu':\n return nn.GELU()\n elif act == 'relu':\n return nn.ReLU()\n elif act == 'sigmoid':\n return nn.Sigmoid()\n elif act == 'tanh':\n return nn.Tanh()\n elif act == 'softrelu' or act == 'softplus':\n return nn.Softplus()\n elif act == 'softsign':\n return nn.Softsign()\n else:\n raise NotImplementedError('act=\"{}\" is not supported. '\n 'Try to include it if you can find that in '\n 'https://pytorch.org/docs/stable/nn.html'.format(act))\n else:\n return act" }, { "identifier": "get_norm_layer", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def get_norm_layer(normalization: str = 'layer_norm',\n axis: int = -1,\n epsilon: float = 1e-5,\n in_channels: int = 0, **kwargs):\n \"\"\"Get the normalization layer based on the provided type\n\n Parameters\n ----------\n normalization\n The type of the layer normalization from ['layer_norm']\n axis\n The axis to normalize the\n epsilon\n The epsilon of the normalization layer\n in_channels\n Input channel\n\n Returns\n -------\n norm_layer\n The layer normalization layer\n \"\"\"\n if isinstance(normalization, str):\n if normalization == 'layer_norm':\n assert in_channels > 0\n assert axis == -1\n norm_layer = nn.LayerNorm(normalized_shape=in_channels, eps=epsilon, **kwargs)\n elif normalization == 'rms_norm':\n assert axis == -1\n norm_layer = RMSNorm(d=in_channels, eps=epsilon, **kwargs)\n else:\n raise NotImplementedError('normalization={} is not supported'.format(normalization))\n return norm_layer\n elif normalization is None:\n return nn.Identity()\n else:\n raise NotImplementedError('The type of normalization must be str')" }, { "identifier": "_generalize_padding", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def _generalize_padding(x, pad_t, pad_h, pad_w, padding_type, t_pad_left=False):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n pad_t\n pad_h\n pad_w\n padding_type\n t_pad_left\n\n Returns\n -------\n out\n The result after padding the x. Shape will be (B, T + pad_t, H + pad_h, W + pad_w, C)\n \"\"\"\n if pad_t == 0 and pad_h == 0 and pad_w == 0:\n return x\n\n assert padding_type in ['zeros', 'ignore', 'nearest']\n B, T, H, W, C = x.shape\n\n if padding_type == 'nearest':\n return F.interpolate(x.permute(0, 4, 1, 2, 3), size=(T + pad_t, H + pad_h, W + pad_w)).permute(0, 2, 3, 4, 1)\n else:\n if t_pad_left:\n return F.pad(x, (0, 0, 0, pad_w, 0, pad_h, pad_t, 0))\n else:\n return F.pad(x, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t))" }, { "identifier": "_generalize_unpadding", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def _generalize_unpadding(x, pad_t, pad_h, pad_w, padding_type):\n assert padding_type in['zeros', 'ignore', 'nearest']\n B, T, H, W, C = x.shape\n if pad_t == 0 and pad_h == 0 and pad_w == 0:\n return x\n\n if padding_type == 'nearest':\n return F.interpolate(x.permute(0, 4, 1, 2, 3), size=(T - pad_t, H - pad_h, W - pad_w)).permute(0, 2, 3, 4, 1)\n else:\n return x[:, :(T - pad_t), :(H - pad_h), :(W - pad_w), :].contiguous()" }, { "identifier": "apply_initialization", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def apply_initialization(m,\n linear_mode=\"0\",\n conv_mode=\"0\",\n norm_mode=\"0\",\n embed_mode=\"0\"):\n if isinstance(m, nn.Linear):\n\n if linear_mode in (\"0\", ):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_in', nonlinearity=\"linear\")\n elif linear_mode in (\"1\", ):\n nn.init.kaiming_normal_(m.weight,\n a=0.1,\n mode='fan_out',\n nonlinearity=\"leaky_relu\")\n else:\n raise NotImplementedError\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, (nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d)):\n if conv_mode in (\"0\", ):\n nn.init.kaiming_normal_(m.weight,\n a=0.1,\n mode='fan_out',\n nonlinearity=\"leaky_relu\")\n else:\n raise NotImplementedError\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.LayerNorm):\n if norm_mode in (\"0\", ):\n if m.elementwise_affine:\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise NotImplementedError\n elif isinstance(m, nn.GroupNorm):\n if norm_mode in (\"0\", ):\n if m.affine:\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise NotImplementedError\n # # pos_embed already initialized when created\n elif isinstance(m, nn.Embedding):\n if embed_mode in (\"0\", ):\n nn.init.trunc_normal_(m.weight.data, std=0.02)\n else:\n raise NotImplementedError\n else:\n pass" }, { "identifier": "round_to", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def round_to(dat, c):\n return dat + (dat - dat % c) % c" } ]
from typing import Sequence, Union from functools import lru_cache from collections import OrderedDict from torch import nn from einops import rearrange from .cuboid_transformer import ( Upsample3DLayer, PatchMerging3D, PosEmbed, InitialEncoder, FinalDecoder, InitialStackPatchMergingEncoder, FinalStackUpsamplingDecoder, StackCuboidSelfAttentionBlock, StackCuboidCrossAttentionBlock, CuboidTransformerEncoder) from .cuboid_transformer_patterns import CuboidSelfAttentionPatterns, CuboidCrossAttentionPatterns from .utils import ( get_activation, get_norm_layer, _generalize_padding, _generalize_unpadding, apply_initialization, round_to) import warnings import torch import torch.nn.functional as F import torch.utils.checkpoint as checkpoint
17,468
global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, down_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, ) self.enc_pos_embed = PosEmbed( embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in) mem_shapes = self.encoder.get_mem_shapes() self.dec_pos_embed = PosEmbed( embed_dim=mem_shapes[-1][-1], typ=pos_embed_type, maxT=T_out, maxH=mem_shapes[-1][1], maxW=mem_shapes[-1][2]) self.unet_dec_cross_mode = unet_dec_cross_mode self.decoder = CuboidTransformerUNetDecoder( target_temporal_length=T_out, mem_shapes=mem_shapes, cross_start=dec_cross_start, depth=dec_depth, upsample_type=upsample_type, block_self_attn_patterns=dec_self_attn_patterns, block_self_cuboid_size=dec_self_cuboid_size, block_self_shift_size=dec_self_shift_size, block_self_cuboid_strategy=dec_self_cuboid_strategy, block_cross_attn_patterns=dec_cross_attn_patterns, block_cross_cuboid_hw=dec_cross_cuboid_hw, block_cross_shift_hw=dec_cross_shift_hw, block_cross_cuboid_strategy=dec_cross_cuboid_strategy, block_cross_n_temporal=dec_cross_n_temporal, cross_last_n_frames=dec_cross_last_n_frames, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, upsample_kernel_size=upsample_kernel_size, ffn_activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=dec_use_inter_ffn, max_temporal_relative=T_in + T_out, padding_type=padding_type, hierarchical_pos_embed=dec_hierarchical_pos_embed, pos_embed_type=pos_embed_type, use_self_global=(num_global_vectors > 0) and use_dec_self_global, self_update_global=dec_self_update_global, use_cross_global=(num_global_vectors > 0) and use_dec_cross_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, up_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, # different from CuboidTransformerDecoder downsample=downsample, downsample_type=downsample_type, cross_mode=unet_dec_cross_mode, down_linear_init_mode=down_up_linear_init_mode, ) self.reset_parameters() def get_initial_encoder_final_decoder( self, initial_downsample_type, activation, # initial_downsample_type=="conv" initial_downsample_scale, initial_downsample_conv_layers, final_upsample_conv_layers, padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list, ): T_in, H_in, W_in, C_in = self.input_shape T_out, H_out, W_out, C_out = self.target_shape # Construct the initial upsampling / downsampling layers self.initial_downsample_type = initial_downsample_type if self.initial_downsample_type == "conv": if isinstance(initial_downsample_scale, int): initial_downsample_scale = (1, initial_downsample_scale, initial_downsample_scale) elif len(initial_downsample_scale) == 2: initial_downsample_scale = (1, *initial_downsample_scale) elif len(initial_downsample_scale) == 3: initial_downsample_scale = tuple(initial_downsample_scale) else: raise NotImplementedError(f"initial_downsample_scale {initial_downsample_scale} format not supported!") # if any(ele > 1 for ele in initial_downsample_scale): self.initial_encoder = InitialEncoder(dim=C_in, out_dim=self.base_units, downsample_scale=initial_downsample_scale, num_conv_layers=initial_downsample_conv_layers, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) self.initial_aux_encoder = InitialEncoder(dim=self.auxiliary_channels, out_dim=self.base_units, downsample_scale=initial_downsample_scale, num_conv_layers=initial_downsample_conv_layers, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode)
"""CuboidTransformer adapted for auxiliary inputs in decoder""" class CuboidTransformerUNetDecoder(nn.Module): """U-Net style Decoder of the CuboidTransformer. For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention We add cross attention following 3 modes: cross_mode == "down": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "up": x --> attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "both": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ ^ ^ | | | | | | | | mem mem mem mem """ def __init__(self, target_temporal_length, mem_shapes, cross_start=0, depth=[2, 2], upsample_type="upsample", upsample_kernel_size=3, block_self_attn_patterns=None, block_self_cuboid_size=[(4, 4, 4), (4, 4, 4)], block_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], block_self_shift_size=[(1, 1, 1), (0, 0, 0)], block_cross_attn_patterns=None, block_cross_cuboid_hw=[(4, 4), (4, 4)], block_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')], block_cross_shift_hw=[(0, 0), (0, 0)], block_cross_n_temporal=[1, 2], cross_last_n_frames=None, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', use_inter_ffn=False, hierarchical_pos_embed=False, pos_embed_type='t+hw', max_temporal_relative=50, padding_type='ignore', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # global vectors use_self_global=False, self_update_global=True, use_cross_global=False, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", up_linear_init_mode="0", norm_init_mode="0", # different from `CuboidTransformerDecoder`, no arg `use_first_self_attn=False` downsample=2, downsample_type='patch_merge', cross_mode="up", down_linear_init_mode="0", ): """ Parameters ---------- target_temporal_length mem_shapes cross_start The block to start cross attention depth Depth of each block downsample The downsample ratio downsample_type Type of the downsampling layer upsample_type The type of the upsampling layers upsample_kernel_size block_self_attn_patterns Pattern of the block self attentions block_self_cuboid_size block_self_cuboid_strategy block_self_shift_size block_cross_attn_patterns block_cross_cuboid_hw block_cross_cuboid_strategy block_cross_shift_hw block_cross_n_temporal cross_last_n_frames cross_mode Must be one of ("up", "down", "both") Control whether the upsampling/downsampling/both phases cross attend to the encoded latent features num_heads attn_drop proj_drop ffn_drop ffn_activation gated_ffn Whether to enable gated ffn or not norm_layer The normalization layer use_inter_ffn Whether to use intermediate FFN hierarchical_pos_embed Whether to add pos embedding for each hierarchy. max_temporal_relative padding_type checkpoint_level """ super(CuboidTransformerUNetDecoder, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.up_linear_init_mode = up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(depth) == len(mem_shapes) self.target_temporal_length = target_temporal_length self.num_blocks = len(mem_shapes) self.cross_start = cross_start self.mem_shapes = mem_shapes self.block_units = tuple(mem_shape[-1] for mem_shape in self.mem_shapes) self.depth = depth if not isinstance(downsample, (tuple, list)): downsample = (1, downsample, downsample) self.downsample = downsample self.downsample_type = downsample_type self.upsample_type = upsample_type self.hierarchical_pos_embed = hierarchical_pos_embed self.checkpoint_level = checkpoint_level self.use_self_global = use_self_global self.self_update_global = self_update_global self.use_cross_global = use_cross_global self.use_global_vector_ffn = use_global_vector_ffn assert cross_mode in ["up", "down", "both"], f"Invalid cross_mode {cross_mode}!" self.cross_mode = cross_mode self.up_use_cross = self.cross_mode in ["up", "both"] self.down_use_cross = self.cross_mode in ["down", "both"] if self.num_blocks > 1: # Construct downsampling layers if downsample_type == 'patch_merge': self.downsample_layers = nn.ModuleList( [PatchMerging3D(dim=self.block_units[i], downsample=downsample, # downsample=(1, 1, 1), padding_type=padding_type, out_dim=self.block_units[i + 1], linear_init_mode=down_linear_init_mode, norm_init_mode=norm_init_mode) for i in range(self.num_blocks - 1)]) else: raise NotImplementedError # Construct upsampling layers if self.upsample_type == "upsample": self.upsample_layers = nn.ModuleList([ Upsample3DLayer( dim=self.mem_shapes[i + 1][-1], out_dim=self.mem_shapes[i][-1], target_size=(target_temporal_length,) + self.mem_shapes[i][1:3], kernel_size=upsample_kernel_size, temporal_upsample=False, conv_init_mode=conv_init_mode, ) for i in range(self.num_blocks - 1)]) else: raise NotImplementedError if self.hierarchical_pos_embed: self.down_hierarchical_pos_embed_l = nn.ModuleList([ PosEmbed(embed_dim=self.block_units[i], typ=pos_embed_type, maxT=self.mem_shapes[i][0], maxH=self.mem_shapes[i][1], maxW=self.mem_shapes[i][2]) for i in range(self.num_blocks - 1)]) self.up_hierarchical_pos_embed_l = nn.ModuleList([ PosEmbed(embed_dim=self.block_units[i], typ=pos_embed_type, maxT=self.mem_shapes[i][0], maxH=self.mem_shapes[i][1], maxW=self.mem_shapes[i][2]) for i in range(self.num_blocks - 1)]) if block_self_attn_patterns is not None: if isinstance(block_self_attn_patterns, (tuple, list)): assert len(block_self_attn_patterns) == self.num_blocks else: block_self_attn_patterns = [block_self_attn_patterns for _ in range(self.num_blocks)] block_self_cuboid_size = [] block_self_cuboid_strategy = [] block_self_shift_size = [] for idx, key in enumerate(block_self_attn_patterns): func = CuboidSelfAttentionPatterns.get(key) cuboid_size, strategy, shift_size = func(mem_shapes[idx]) block_self_cuboid_size.append(cuboid_size) block_self_cuboid_strategy.append(strategy) block_self_shift_size.append(shift_size) else: if not isinstance(block_self_cuboid_size[0][0], (list, tuple)): block_self_cuboid_size = [block_self_cuboid_size for _ in range(self.num_blocks)] else: assert len(block_self_cuboid_size) == self.num_blocks,\ f'Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}' if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)): block_self_cuboid_strategy = [block_self_cuboid_strategy for _ in range(self.num_blocks)] else: assert len(block_self_cuboid_strategy) == self.num_blocks,\ f'Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}' if not isinstance(block_self_shift_size[0][0], (list, tuple)): block_self_shift_size = [block_self_shift_size for _ in range(self.num_blocks)] else: assert len(block_self_shift_size) == self.num_blocks,\ f'Incorrect input format! Received block_self_shift_size={block_self_shift_size}' down_self_blocks = [] up_self_blocks = [] for i in range(self.num_blocks): ele_depth = depth[i] stack_cuboid_blocks =\ [StackCuboidSelfAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_size=block_self_cuboid_size[i], block_strategy=block_self_cuboid_strategy[i], block_shift_size=block_self_shift_size[i], attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, padding_type=padding_type, use_global_vector=use_self_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(ele_depth)] down_self_blocks.append(nn.ModuleList(stack_cuboid_blocks)) stack_cuboid_blocks = \ [StackCuboidSelfAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_size=block_self_cuboid_size[i], block_strategy=block_self_cuboid_strategy[i], block_shift_size=block_self_shift_size[i], attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, padding_type=padding_type, use_global_vector=use_self_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(ele_depth)] up_self_blocks.append(nn.ModuleList(stack_cuboid_blocks)) self.down_self_blocks = nn.ModuleList(down_self_blocks) self.up_self_blocks = nn.ModuleList(up_self_blocks) if block_cross_attn_patterns is not None: if isinstance(block_cross_attn_patterns, (tuple, list)): assert len(block_cross_attn_patterns) == self.num_blocks else: block_cross_attn_patterns = [block_cross_attn_patterns for _ in range(self.num_blocks)] block_cross_cuboid_hw = [] block_cross_cuboid_strategy = [] block_cross_shift_hw = [] block_cross_n_temporal = [] for idx, key in enumerate(block_cross_attn_patterns): if key == "last_frame_dst": cuboid_hw = None shift_hw = None strategy = None n_temporal = None else: func = CuboidCrossAttentionPatterns.get(key) cuboid_hw, shift_hw, strategy, n_temporal = func(mem_shapes[idx]) block_cross_cuboid_hw.append(cuboid_hw) block_cross_cuboid_strategy.append(strategy) block_cross_shift_hw.append(shift_hw) block_cross_n_temporal.append(n_temporal) else: if not isinstance(block_cross_cuboid_hw[0][0], (list, tuple)): block_cross_cuboid_hw = [block_cross_cuboid_hw for _ in range(self.num_blocks)] else: assert len(block_cross_cuboid_hw) == self.num_blocks, \ f'Incorrect input format! Received block_cross_cuboid_hw={block_cross_cuboid_hw}' if not isinstance(block_cross_cuboid_strategy[0][0], (list, tuple)): block_cross_cuboid_strategy = [block_cross_cuboid_strategy for _ in range(self.num_blocks)] else: assert len(block_cross_cuboid_strategy) == self.num_blocks, \ f'Incorrect input format! Received block_cross_cuboid_strategy={block_cross_cuboid_strategy}' if not isinstance(block_cross_shift_hw[0][0], (list, tuple)): block_cross_shift_hw = [block_cross_shift_hw for _ in range(self.num_blocks)] else: assert len(block_cross_shift_hw) == self.num_blocks, \ f'Incorrect input format! Received block_cross_shift_hw={block_cross_shift_hw}' if not isinstance(block_cross_n_temporal[0], (list, tuple)): block_cross_n_temporal = [block_cross_n_temporal for _ in range(self.num_blocks)] else: assert len(block_cross_n_temporal) == self.num_blocks, \ f'Incorrect input format! Received block_cross_n_temporal={block_cross_n_temporal}' if self.up_use_cross: self.up_cross_blocks = nn.ModuleList() for i in range(self.cross_start, self.num_blocks): cross_block = nn.ModuleList( [StackCuboidCrossAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_hw=block_cross_cuboid_hw[i], block_strategy=block_cross_cuboid_strategy[i], block_shift_hw=block_cross_shift_hw[i], block_n_temporal=block_cross_n_temporal[i], cross_last_n_frames=cross_last_n_frames, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, activation=ffn_activation, max_temporal_relative=max_temporal_relative, padding_type=padding_type, use_global_vector=use_cross_global, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(depth[i])]) self.up_cross_blocks.append(cross_block) if self.down_use_cross: self.down_cross_blocks = nn.ModuleList() for i in range(self.cross_start, self.num_blocks): cross_block = nn.ModuleList( [StackCuboidCrossAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_hw=block_cross_cuboid_hw[i], block_strategy=block_cross_cuboid_strategy[i], block_shift_hw=block_cross_shift_hw[i], block_n_temporal=block_cross_n_temporal[i], cross_last_n_frames=cross_last_n_frames, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, activation=ffn_activation, max_temporal_relative=max_temporal_relative, padding_type=padding_type, use_global_vector=use_cross_global, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(depth[i])]) self.down_cross_blocks.append(cross_block) self.reset_parameters() def reset_parameters(self): for ms in self.down_self_blocks: for m in ms: m.reset_parameters() for ms in self.up_self_blocks: for m in ms: m.reset_parameters() if self.up_use_cross: for ms in self.up_cross_blocks: for m in ms: m.reset_parameters() if self.down_use_cross: for ms in self.down_cross_blocks: for m in ms: m.reset_parameters() if self.num_blocks > 1: for m in self.downsample_layers: m.reset_parameters() for m in self.upsample_layers: m.reset_parameters() if self.hierarchical_pos_embed: for m in self.down_hierarchical_pos_embed_l: m.reset_parameters() for m in self.up_hierarchical_pos_embed_l: m.reset_parameters() def forward(self, x, mem_l, mem_global_vector_l=None): """ Parameters ---------- x Shape (B, T, H, W, C) mem_l A list of memory tensors Returns ------- out """ B, T, H, W, C = x.shape assert T == self.target_temporal_length assert (H, W) == (self.mem_shapes[0][1], self.mem_shapes[0][2]) new_mem_global_vector_l = [] for i in range(self.num_blocks): # Downample if i > 0: x = self.downsample_layers[i - 1](x) if self.hierarchical_pos_embed: x = self.down_hierarchical_pos_embed_l[i - 1](x) mem_global_vector = None if mem_global_vector_l is None else mem_global_vector_l[i] for idx in range(self.depth[i]): if self.use_self_global: if self.self_update_global: x, mem_global_vector = self.down_self_blocks[i][idx](x, mem_global_vector) else: x, _ = self.down_self_blocks[i][idx](x, mem_global_vector) else: x = self.down_self_blocks[i][idx](x) if self.down_use_cross and i >= self.cross_start: x = self.down_cross_blocks[i - self.cross_start][idx](x, mem_l[i], mem_global_vector) new_mem_global_vector_l.append(mem_global_vector) for i in range(self.num_blocks - 1, -1, -1): mem_global_vector = new_mem_global_vector_l[i] for idx in range(self.depth[i]): if self.use_self_global: if self.self_update_global: x, mem_global_vector = self.up_self_blocks[i][idx](x, mem_global_vector) else: x, _ = self.up_self_blocks[i][idx](x, mem_global_vector) else: x = self.up_self_blocks[i][idx](x) if self.up_use_cross and i >= self.cross_start: x = self.up_cross_blocks[i - self.cross_start][idx](x, mem_l[i], mem_global_vector) # Upsample if i > 0: x = self.upsample_layers[i - 1](x) if self.hierarchical_pos_embed: x = self.up_hierarchical_pos_embed_l[i - 1](x) return x class CuboidTransformerAuxModel(nn.Module): """Cuboid Transformer with auxiliary input in decoder for spatiotemporal forecasting We adopt the Non-autoregressive encoder-decoder architecture. The decoder takes the multi-scale memory output from the encoder, as well as auxiliary input. The initial downsampling / upsampling layers will be Downsampling: [K x Conv2D --> PatchMerge] Upsampling: [Nearest Interpolation-based Upsample --> K x Conv2D] x -----------> downsample (optional) ---> (+pos_embed) ---> enc ---------> mem_l | | |------------------| | | aux_input ---> downsample (optional) ---> (+pos_embed) ---> enc -> cross_attn -> dec -> upsample (optional) -> y """ def __init__(self, input_shape, target_shape, base_units=128, block_units=None, scale_alpha=1.0, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, # inter-attn downsample/upsample downsample=2, downsample_type='patch_merge', upsample_type="upsample", upsample_kernel_size=3, # encoder enc_depth=[4, 4, 4], enc_attn_patterns=None, enc_cuboid_size=[(4, 4, 4), (4, 4, 4)], enc_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], enc_shift_size=[(0, 0, 0), (0, 0, 0)], enc_use_inter_ffn=True, # decoder dec_depth=[2, 2], dec_cross_start=0, dec_self_attn_patterns=None, dec_self_cuboid_size=[(4, 4, 4), (4, 4, 4)], dec_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], dec_self_shift_size=[(1, 1, 1), (0, 0, 0)], dec_cross_attn_patterns=None, dec_cross_cuboid_hw=[(4, 4), (4, 4)], dec_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')], dec_cross_shift_hw=[(0, 0), (0, 0)], dec_cross_n_temporal=[1, 2], dec_cross_last_n_frames=None, dec_use_inter_ffn=True, dec_hierarchical_pos_embed=False, # global vectors num_global_vectors=4, use_dec_self_global=True, dec_self_update_global=True, use_dec_cross_global=True, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # # initial downsample and final upsample initial_downsample_type="conv", initial_downsample_activation="leaky", # initial_downsample_type=="conv" initial_downsample_scale=1, initial_downsample_conv_layers=2, final_upsample_conv_layers=2, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers=1, initial_downsample_stack_conv_dim_list=None, initial_downsample_stack_conv_downscale_list=[1, ], initial_downsample_stack_conv_num_conv_list=[2, ], # # end of initial downsample and final upsample ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', padding_type='ignore', pos_embed_type='t+hw', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", down_up_linear_init_mode="0", norm_init_mode="0", # different from CuboidTransformerModel, no arg `dec_use_first_self_attn=False` auxiliary_channels: int = 1, unet_dec_cross_mode="up", ): """ Parameters ---------- input_shape Shape of the input tensor. It will be (T, H, W, C_in) target_shape Shape of the input tensor. It will be (T_out, H, W, C_out) base_units The base units """ super(CuboidTransformerAuxModel, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.down_up_linear_init_mode = down_up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(enc_depth) == len(dec_depth) self.base_units = base_units self.num_global_vectors = num_global_vectors if global_dim_ratio != 1: assert separate_global_qkv == True, \ f"Setting global_dim_ratio != 1 requires separate_global_qkv == True." self.global_dim_ratio = global_dim_ratio self.input_shape = input_shape self.target_shape = target_shape T_in, H_in, W_in, C_in = input_shape T_out, H_out, W_out, C_out = target_shape assert H_in == H_out and W_in == W_out self.auxiliary_channels = auxiliary_channels if self.num_global_vectors > 0: self.init_global_vectors = nn.Parameter( torch.zeros((self.num_global_vectors, global_dim_ratio*base_units))) new_input_shape = self.get_initial_encoder_final_decoder( initial_downsample_scale=initial_downsample_scale, initial_downsample_type=initial_downsample_type, activation=initial_downsample_activation, # initial_downsample_type=="conv" initial_downsample_conv_layers=initial_downsample_conv_layers, final_upsample_conv_layers=final_upsample_conv_layers, padding_type=padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers=initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list=initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list=initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list=initial_downsample_stack_conv_num_conv_list, ) T_in, H_in, W_in, _ = new_input_shape self.encoder = CuboidTransformerEncoder( input_shape=(T_in, H_in, W_in, base_units), base_units=base_units, block_units=block_units, scale_alpha=scale_alpha, depth=enc_depth, downsample=downsample, downsample_type=downsample_type, block_attn_patterns=enc_attn_patterns, block_cuboid_size=enc_cuboid_size, block_strategy=enc_cuboid_strategy, block_shift_size=enc_shift_size, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, ffn_activation=ffn_activation, norm_layer=norm_layer, use_inter_ffn=enc_use_inter_ffn, padding_type=padding_type, use_global_vector=num_global_vectors > 0, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, down_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, ) self.enc_pos_embed = PosEmbed( embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in) mem_shapes = self.encoder.get_mem_shapes() self.dec_pos_embed = PosEmbed( embed_dim=mem_shapes[-1][-1], typ=pos_embed_type, maxT=T_out, maxH=mem_shapes[-1][1], maxW=mem_shapes[-1][2]) self.unet_dec_cross_mode = unet_dec_cross_mode self.decoder = CuboidTransformerUNetDecoder( target_temporal_length=T_out, mem_shapes=mem_shapes, cross_start=dec_cross_start, depth=dec_depth, upsample_type=upsample_type, block_self_attn_patterns=dec_self_attn_patterns, block_self_cuboid_size=dec_self_cuboid_size, block_self_shift_size=dec_self_shift_size, block_self_cuboid_strategy=dec_self_cuboid_strategy, block_cross_attn_patterns=dec_cross_attn_patterns, block_cross_cuboid_hw=dec_cross_cuboid_hw, block_cross_shift_hw=dec_cross_shift_hw, block_cross_cuboid_strategy=dec_cross_cuboid_strategy, block_cross_n_temporal=dec_cross_n_temporal, cross_last_n_frames=dec_cross_last_n_frames, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, upsample_kernel_size=upsample_kernel_size, ffn_activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=dec_use_inter_ffn, max_temporal_relative=T_in + T_out, padding_type=padding_type, hierarchical_pos_embed=dec_hierarchical_pos_embed, pos_embed_type=pos_embed_type, use_self_global=(num_global_vectors > 0) and use_dec_self_global, self_update_global=dec_self_update_global, use_cross_global=(num_global_vectors > 0) and use_dec_cross_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, up_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, # different from CuboidTransformerDecoder downsample=downsample, downsample_type=downsample_type, cross_mode=unet_dec_cross_mode, down_linear_init_mode=down_up_linear_init_mode, ) self.reset_parameters() def get_initial_encoder_final_decoder( self, initial_downsample_type, activation, # initial_downsample_type=="conv" initial_downsample_scale, initial_downsample_conv_layers, final_upsample_conv_layers, padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list, ): T_in, H_in, W_in, C_in = self.input_shape T_out, H_out, W_out, C_out = self.target_shape # Construct the initial upsampling / downsampling layers self.initial_downsample_type = initial_downsample_type if self.initial_downsample_type == "conv": if isinstance(initial_downsample_scale, int): initial_downsample_scale = (1, initial_downsample_scale, initial_downsample_scale) elif len(initial_downsample_scale) == 2: initial_downsample_scale = (1, *initial_downsample_scale) elif len(initial_downsample_scale) == 3: initial_downsample_scale = tuple(initial_downsample_scale) else: raise NotImplementedError(f"initial_downsample_scale {initial_downsample_scale} format not supported!") # if any(ele > 1 for ele in initial_downsample_scale): self.initial_encoder = InitialEncoder(dim=C_in, out_dim=self.base_units, downsample_scale=initial_downsample_scale, num_conv_layers=initial_downsample_conv_layers, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) self.initial_aux_encoder = InitialEncoder(dim=self.auxiliary_channels, out_dim=self.base_units, downsample_scale=initial_downsample_scale, num_conv_layers=initial_downsample_conv_layers, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode)
self.final_decoder = FinalDecoder(dim=self.base_units,
4
2023-10-23 11:45:50+00:00
24k
IBM/VillanDiffusion
viallanDiffusion_conditional.py
[ { "identifier": "Backdoor", "path": "caption_dataset.py", "snippet": "class Backdoor():\n CHANNEL_LAST = -1\n CHANNEL_FIRST = -3\n \n GREY_BG_RATIO = 0.3\n \n STOP_SIGN_IMG = \"static/stop_sign_wo_bg.png\"\n # STOP_SIGN_IMG = \"static/stop_sign_bg_blk.jpg\"\n CAT_IMG = \"static/cat_wo_bg.png\"\n GLASSES_IMG = \"static/glasses.png\"\n V_IMG: str = \"static/v_for_vendetta.png\"\n JOKER_IMG: str = \"static/joker.png\"\n HACKER_IMG: str = \"static/hacker.png\"\n HACKING_IMG: str = \"static/hacking.png\"\n \n TARGET_FA = \"FASHION\"\n TARGET_TG = \"TRIGGER\"\n TARGET_BOX = \"BOX\"\n # TARGET_BOX_MED = \"BOX_MED\"\n TARGET_SHIFT = \"SHIFT\"\n TARGET_HAT = \"HAT\"\n TARGET_FEDORA_HAT = \"FEDORA_HAT\"\n TARGET_CAT = \"CAT\"\n TARGET_V: str = \"V\"\n TARGET_JOKER: str = \"JOKER\"\n TARGET_HACKER: str = \"HACKER\"\n TARGET_HACKING: str = \"HACKING\"\n \n TRIGGER_GAP_X = TRIGGER_GAP_Y = 2\n \n TRIGGER_NONE = \"NONE\"\n TRIGGER_FA = \"FASHION\"\n TRIGGER_FA_EZ = \"FASHION_EZ\"\n TRIGGER_MNIST = \"MNIST\"\n TRIGGER_MNIST_EZ = \"MNIST_EZ\"\n TRIGGER_SM_BOX = \"SM_BOX\"\n TRIGGER_XSM_BOX = \"XSM_BOX\"\n TRIGGER_XXSM_BOX = \"XXSM_BOX\"\n TRIGGER_XXXSM_BOX = \"XXXSM_BOX\"\n TRIGGER_BIG_BOX = \"BIG_BOX\"\n TRIGGER_BIG_BOX_MED = \"BIG_BOX_MED\"\n TRIGGER_SM_BOX_MED = \"SM_BOX_MED\"\n TRIGGER_XSM_BOX_MED = \"XSM_BOX_MED\"\n TRIGGER_XXSM_BOX_MED = \"XXSM_BOX_MED\"\n TRIGGER_XXXSM_BOX_MED = \"XXXSM_BOX_MED\"\n TRIGGER_GLASSES = \"GLASSES\"\n TRIGGER_BIG_STOP_SIGN = \"BIG_STOP_SIGN\"\n TRIGGER_SM_STOP_SIGN = \"SM_STOP_SIGN\"\n TRIGGER_XSM_STOP_SIGN = \"XSM_STOP_SIGN\"\n TRIGGER_XXSM_STOP_SIGN = \"XXSM_STOP_SIGN\"\n TRIGGER_XXXSM_STOP_SIGN = \"XXXSM_STOP_SIGN\"\n \n # GREY_NORM_MIN = 0\n # GREY_NORM_MAX = 1\n \n def __init__(self, root: str):\n self.__root = root\n \n def __get_transform(self, channel: int, image_size: Union[int, Tuple[int]], vmin: Union[float, int], vmax: Union[float, int], prev_trans: List=[], next_trans: List=[]):\n if channel == 1:\n channel_trans = transforms.Grayscale(num_output_channels=1)\n elif channel == 3:\n channel_trans = transforms.Lambda(lambda x: x.convert(\"RGB\"))\n \n trans = [channel_trans,\n transforms.Resize(image_size), \n transforms.ToTensor(),\n # transforms.Lambda(lambda x: normalize(vmin_out=vmin, vmax_out=vmax, x=x)),\n transforms.Lambda(lambda x: normalize(vmin_in=0.0, vmax_in=1.0, vmin_out=vmin, vmax_out=vmax, x=x)),\n # transforms.Lambda(lambda x: x * 2 - 1),\n ]\n return Compose(prev_trans + trans + next_trans)\n \n @staticmethod\n def __read_img(path: Union[str, os.PathLike]):\n return Image.open(path)\n @staticmethod\n def __bg2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = (vmax - vmin) * Backdoor.GREY_BG_RATIO + vmin\n trig[trig <= thres] = thres\n return trig\n @staticmethod\n def __bg2black(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = (vmax - vmin) * Backdoor.GREY_BG_RATIO + vmin\n trig[trig <= thres] = vmin\n return trig\n @staticmethod\n def __white2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = vmax - (vmax - vmin) * Backdoor.GREY_BG_RATIO\n trig[trig >= thres] = thres\n return trig\n @staticmethod\n def __white2med(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = vmax - (vmax - vmin) * Backdoor.GREY_BG_RATIO\n trig[trig >= 0.7] = (vmax - vmin) / 2\n return trig\n \n def __get_img_target(self, path: Union[str, os.PathLike], image_size: int, channel: int, vmin: Union[float, int], vmax: Union[float, int], is_clip_bg: bool=True):\n img = Backdoor.__read_img(path)\n trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n if is_clip_bg:\n return Backdoor.__bg2grey(trig=trig, vmin=vmin, vmax=vmax)\n return trig\n \n def __get_img_trigger(self, path: Union[str, os.PathLike], image_size: int, channel: int, trigger_sz: int, vmin: Union[float, int], vmax: Union[float, int], x: int=None, y: int=None):\n # Padding of Left & Top\n l_pad = t_pad = int((image_size - trigger_sz) / 2)\n r_pad = image_size - trigger_sz - l_pad\n b_pad = image_size - trigger_sz - t_pad\n residual = image_size - trigger_sz\n if x != None:\n if x > 0:\n l_pad = x\n r_pad = residual - l_pad\n else:\n r_pad = -x\n l_pad = residual - r_pad\n if y != None:\n if y > 0:\n t_pad = y\n b_pad = residual - t_pad\n else:\n b_pad = -y\n t_pad = residual - b_pad\n \n img = Backdoor.__read_img(path)\n next_trans = [transforms.Pad(padding=[l_pad, t_pad, r_pad, b_pad], fill=vmin)]\n trig = self.__get_transform(channel=channel, image_size=trigger_sz, vmin=vmin, vmax=vmax, next_trans=next_trans)(img)\n # thres = (vmax - vmin) * 0.3 + vmin\n # trig[trig <= thres] = vmin\n trig[trig >= 0.999] = vmin\n # print(f\"trigger shape: {trig.shape}\")\n return trig\n @staticmethod\n def __roll(x: torch.Tensor, dx: int, dy: int):\n shift = tuple([0] * len(x.shape[:-2]) + [dy] + [dx])\n dim = tuple([i for i in range(len(x.shape))])\n return torch.roll(x, shifts=shift, dims=dim)\n @staticmethod\n def __get_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int], val: Union[float, int]):\n if isinstance(image_size, int):\n img_shape = (image_size, image_size)\n elif isinstance(image_size, list):\n img_shape = image_size\n else:\n raise TypeError(f\"Argument image_size should be either an integer or a list\")\n trig = torch.full(size=(channel, *img_shape), fill_value=vmin)\n trig[:, b1[0]:b2[0], b1[1]:b2[1]] = val\n return trig\n @staticmethod\n def __get_white_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n return Backdoor.__get_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax, val=vmax)\n @staticmethod\n def __get_grey_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n return Backdoor.__get_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax, val=(vmin + vmax) / 2)\n @staticmethod\n def __get_trig_box_coord(x: int, y: int):\n if x < 0 or y < 0:\n raise ValueError(f\"Argument x, y should > 0\")\n return (- (y + Backdoor.TRIGGER_GAP_Y), - (x + Backdoor.TRIGGER_GAP_X)), (- Backdoor.TRIGGER_GAP_Y, - Backdoor.TRIGGER_GAP_X)\n \n def get_trigger(self, type: str, channel: int, image_size: int, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n if type == Backdoor.TRIGGER_FA:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = FashionMNIST(root=self.__root, train=True, download=True, transform=trans)\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[0][0], vmin=vmin, vmax=vmax), dx=0, dy=2)\n elif type == Backdoor.TRIGGER_FA_EZ:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = FashionMNIST(root=self.__root, train=True, download=True, transform=trans)\n # Backdoor image ID: 135, 144\n # return ds[144][0]\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[144][0], vmin=vmin, vmax=vmax), dx=0, dy=4)\n elif type == Backdoor.TRIGGER_MNIST:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = MNIST(root=self.__root, train=True, download=True, transform=trans)\n # Backdoor image ID: 3, 6, 8\n # return ds[3][0]\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[3][0], vmin=vmin, vmax=vmax), dx=10, dy=3)\n elif type == Backdoor.TRIGGER_MNIST_EZ:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = MNIST(root=self.__root, train=True, download=True, transform=trans)\n # Backdoor image ID: 3, 6, 8\n # return ds[6][0]\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[6][0], vmin=vmin, vmax=vmax), dx=10, dy=3)\n elif type == Backdoor.TRIGGER_SM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(14, 14)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XSM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(11, 11)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXSM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(8, 8)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXXSM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(4, 4)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_BIG_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(18, 18)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_BIG_BOX_MED:\n b1, b2 = Backdoor.__get_trig_box_coord(18, 18)\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_SM_BOX_MED:\n b1, b2 = Backdoor.__get_trig_box_coord(14, 14)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XSM_BOX_MED: \n b1, b2 = Backdoor.__get_trig_box_coord(11, 11)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXSM_BOX_MED: \n b1, b2 = Backdoor.__get_trig_box_coord(8, 8)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXXSM_BOX_MED: \n b1, b2 = Backdoor.__get_trig_box_coord(4, 4)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_GLASSES:\n trigger_sz = int(image_size * 0.625)\n return self.__get_img_trigger(path=Backdoor.GLASSES_IMG, image_size=image_size, channel=channel, trigger_sz=trigger_sz, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_BIG_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=18, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_SM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=14, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_XSM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=11, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_XXSM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=8, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_XXXSM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=4, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_NONE: \n # trig = torch.zeros(channel, image_size, image_size)\n trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n return trig\n else:\n raise ValueError(f\"Trigger type {type} isn't found\")\n \n def __check_channel(self, sample: torch.Tensor, channel_first: bool=None) -> int:\n if channel_first != None:\n # If user specified the localation of the channel\n if self.__channel_first:\n if sample.shape[Backdoor.CHANNEL_FIRST] == 1 or sample.shape[Backdoor.CHANNEL_FIRST] == 3:\n return Backdoor.CHANNEL_FIRST\n elif sample.shape[Backdoor.CHANNEL_LAST] == 1 or sample.shape[Backdoor.CHANNEL_LAST] == 3:\n return Backdoor.CHANNEL_LAST\n warnings.warn(Log.warning(\"The specified Channel doesn't exist, determine channel automatically\"))\n print(Log.warning(\"The specified Channel doesn't exist, determine channel automatically\"))\n \n # If user doesn't specified the localation of the channel or the \n if (sample.shape[Backdoor.CHANNEL_LAST] == 1 or sample.shape[Backdoor.CHANNEL_LAST] == 3) and \\\n (sample.shape[Backdoor.CHANNEL_FIRST] == 1 or sample.shape[Backdoor.CHANNEL_FIRST] == 3):\n raise ValueError(f\"Duplicate channel found, found {sample.shape[Backdoor.CHANNEL_LAST]} at dimension 2 and {sample.shape[Backdoor.CHANNEL_FIRST]} at dimension 0\")\n\n if sample.shape[Backdoor.CHANNEL_LAST] == 1 or sample.shape[Backdoor.CHANNEL_LAST] == 3:\n return Backdoor.CHANNEL_LAST\n elif sample.shape[Backdoor.CHANNEL_FIRST] == 1 or sample.shape[Backdoor.CHANNEL_FIRST] == 3:\n return Backdoor.CHANNEL_FIRST\n else:\n raise ValueError(f\"Invalid channel shape, found {sample.shape[Backdoor.CHANNEL_LAST]} at dimension 2 and {sample.shape[Backdoor.CHANNEL_FIRST]} at dimension 0\")\n \n def __check_image_size(self, sample: torch.Tensor, channel_loc: int):\n image_size = list(sample.shape)[-3:]\n del image_size[channel_loc]\n return image_size\n \n def get_target(self, type: str, trigger: torch.tensor=None, dx: int=-5, dy: int=-3, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n channel_loc = self.__check_channel(sample=trigger, channel_first=None)\n channel = trigger.shape[channel_loc]\n image_size = self.__check_image_size(sample=trigger, channel_loc=channel_loc)\n print(f\"image size: {image_size}\")\n if type == Backdoor.TARGET_TG:\n if trigger == None:\n raise ValueError(\"trigger shouldn't be none\")\n return Backdoor.__bg2grey(trigger.clone().detach(), vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_SHIFT:\n if trigger == None:\n raise ValueError(\"trigger shouldn't be none\")\n # t_trig = trigger.clone().detach()\n # shift = tuple([0] * len(t_trig.shape[:-2]) + [dy] + [dx])\n # dim = tuple([i for i in range(len(t_trig.shape))])\n # # print(f\"Shift: {shift} | t_trig: {t_trig.shape}\")\n # return torch.roll(t_trig, shifts=shift, dims=dim)\n return Backdoor.__bg2grey(Backdoor.__roll(trigger.clone().detach(), dx=dx, dy=dy), vmin=vmin, vmax=vmax)\n # elif type == Backdoor.TARGET_BOX:\n # # z = torch.full_like(trigger, fill_value=vmin)\n # # z[:, 0:10, 0:10] = vmax\n # # return z\n # b1 = (None, None)\n # b2 = (10, 10)\n # return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_BOX:\n b1 = (None, None)\n b2 = (10, 10)\n return Backdoor.__bg2grey(trig=Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax), vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_FA:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = FashionMNIST(root=self.__root, train=True, download=True, transform=trans)\n # return ds[0][0]\n return Backdoor.__bg2grey(trig=ds[0][0], vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_HAT:\n # img = Backdoor.__read_img(\"static/hat.png\")\n # trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n # return trig\n return self.__get_img_target(path=\"static/hat.png\", channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_FEDORA_HAT:\n # img = Backdoor.__read_img(\"static/fedora-hat.png\")\n # trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n # return trig\n return self.__get_img_target(path=\"static/fedora-hat.png\", channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_CAT:\n # img = Backdoor.__read_img(\"static/cat.png\")\n # trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n # return trig\n return self.__get_img_target(path=Backdoor.CAT_IMG, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_V:\n return self.__get_img_target(path=Backdoor.V_IMG, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax, is_clip_bg=False)\n elif type == Backdoor.TARGET_JOKER:\n return self.__get_img_target(path=Backdoor.JOKER_IMG, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax, is_clip_bg=False)\n elif type == Backdoor.TARGET_HACKER:\n return self.__get_img_target(path=Backdoor.HACKER_IMG, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_HACKING:\n return self.__get_img_target(path=Backdoor.HACKING_IMG, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n else:\n raise NotImplementedError(f\"Target type {type} isn't found\")\n \n def show_image(self, img: torch.Tensor):\n plt.axis('off') \n plt.tight_layout()\n plt.imshow(img.permute(1, 2, 0).squeeze(), cmap='gray')\n plt.show()" }, { "identifier": "DatasetLoader", "path": "caption_dataset.py", "snippet": "class DatasetLoader(object):\n # Dataset generation mode\n MODE_FIXED = \"FIXED\"\n MODE_FLEX = \"FLEX\"\n \n # Dataset names\n MNIST = \"MNIST\"\n CIFAR10 = \"CIFAR10\"\n CELEBA = \"CELEBA\"\n LSUN_CHURCH = \"LSUN-CHURCH\"\n LSUN_BEDROOM = \"LSUN-BEDROOM\"\n CELEBA_HQ = \"CELEBA-HQ\"\n CELEBA_HQ_DIALOG = \"CELEBA-HQ-DIALOG\"\n LAION_COCO = \"LAION-COCO\"\n LAION_COCO_1 = \"LAION-COCO-1\"\n LAION_COCO_20K = \"LAION-COCO-20K\"\n LAION_COCO_200 = \"LAION-COCO-200\"\n LAION_COCO_50K = \"LAION-COCO-50K\"\n POKEMON_CAPTION = \"POKEMON-CAPTION\"\n \n # Inpaint Type\n INPAINT_BOX: str = \"INPAINT_BOX\"\n INPAINT_LINE: str = \"INPAINT_LINE\"\n\n TRAIN = \"train\"\n TEST = \"test\"\n POISON_IMAGE = \"poison_image\"\n IMAGE = \"image\"\n IS_CLEAN = \"is_clean\"\n RAW = \"raw\"\n LABEL = \"label\"\n CAPTION = \"caption\"\n RAW_CAPTION = \"raw_caption\"\n \n CAPTION_AUGMENT_KEY: str = \"caption_aug\"\n # CAPTION_TOKEN = \"caption_token\"\n def __init__(self, name: str, label: int=None, root: str=None, \n channel: int=None, image_size: int=None, split: str='[:100%]',\n vmin: Union[int, float]=DEFAULT_VMIN, vmax: Union[int, float]=DEFAULT_VMAX, \n batch_size: int=512, shuffle: bool=True, num_workers: int=8, force_R_to_0: bool=False, seed: int=0):\n self.__root = root\n self.__name = name\n if label != None and not isinstance(label, list)and not isinstance(label, tuple):\n self.__label = [label]\n else:\n self.__label = label\n self.__channel = channel\n self.__vmin = vmin\n self.__vmax = vmax\n self.__batch_size = batch_size\n self.__shuffle = shuffle\n self.__split = split\n self.__dataset = self.__load_dataset(name=name)\n self.__set_img_shape(image_size=image_size)\n self.__trigger = self.__target = self.__caption_trigger = self.__poison_rate = None\n self.__clean_rate = 1\n self.__seed = seed\n self.__num_workers = num_workers\n self.__force_R_to_0 = force_R_to_0\n self.__caption_backdoor = CaptionBackdoor()\n if root != None:\n self.__backdoor = Backdoor(root=root)\n \n # self.__prep_dataset()\n\n def set_poison(self, trigger_type: str, target_type: str, caption_trigger_type: str=None, rand_caption_trig_pos: int=0, target_dx: int=-5, target_dy: int=-3, clean_rate: float=1.0, poison_rate: float=0.2) -> 'DatasetLoader':\n if self.__root == None:\n raise ValueError(\"Attribute 'root' is None\")\n self.__clean_rate = clean_rate\n self.__poison_rate = poison_rate\n self.__trigger = self.__backdoor.get_trigger(type=trigger_type, channel=self.__channel, image_size=self.__image_size, vmin=self.__vmin, vmax=self.__vmax)\n self.__caption_trigger = self.__caption_backdoor.get_trigger(_type=caption_trigger_type)\n self.__rand_caption_trig_pos: int = rand_caption_trig_pos\n self.__target = self.__backdoor.get_target(type=target_type, trigger=self.__trigger, dx=target_dx, dy=target_dy)\n return self\n \n def __load_dataset(self, name: str):\n datasets.config.IN_MEMORY_MAX_SIZE = 50 * 2 ** 30\n split_method = f'train{self.__split}+test{self.__split}'\n if name == DatasetLoader.MNIST:\n return load_dataset(\"mnist\", split=split_method)\n elif name == DatasetLoader.CIFAR10:\n return load_dataset(\"cifar10\", split=split_method)\n elif name == DatasetLoader.CELEBA:\n return load_dataset(\"student/celebA\", split=f\"train{self.__split}\")\n elif name == DatasetLoader.CELEBA_HQ:\n return load_dataset(\"datasets/celeba_hq_256\", split=f\"train{self.__split}\")\n elif name ==DatasetLoader.CELEBA_HQ_DIALOG:\n return CelebA_HQ_Dialog(path=\"datasets/CelebA-Dialog (HQ)\").prepare(split=f\"train{self.__split}\")\n elif name == DatasetLoader.LAION_COCO or name == DatasetLoader.LAION_COCO_20K:\n return LaionCoco.load(\"/work/u2941379/workspace/laion_coco_hg200K.hf\")\n elif name == DatasetLoader.LAION_COCO_1:\n return LaionCoco.load(\"/work/u2941379/workspace/laion_coco_hg1.hf\")\n elif name == DatasetLoader.LAION_COCO_200:\n return LaionCoco.load(\"/work/u2941379/workspace/laion_coco_hg200.hf\")\n elif name == DatasetLoader.LAION_COCO_50K:\n return LaionCoco.load(\"/work/u2941379/workspace/laion_coco_hg50K.hf\")\n elif name == DatasetLoader.POKEMON_CAPTION:\n return load_dataset(\"lambdalabs/pokemon-blip-captions\", split=f\"train{self.__split}\")\n else:\n raise NotImplementedError(f\"Undefined dataset: {name}\")\n \n def __set_img_shape(self, image_size: int) -> None:\n # Set channel\n if self.__name == self.MNIST:\n self.__channel = 1 if self.__channel == None else self.__channel\n # self.__vmin = -1\n # self.__vmax = 1\n self.__cmap = \"gray\"\n elif self.__name == self.CIFAR10 or self.__name == self.CELEBA or self.__name == self.CELEBA_HQ or self.__name == self.LSUN_CHURCH or self.__name == self.LAION_COCO or self.__name == self.LAION_COCO_1 or self.__name == self.LAION_COCO_200 or self.__name == self.LAION_COCO_20K or self.__name == self.LAION_COCO_50K or self.__name == self.POKEMON_CAPTION or self.__name == self.CELEBA_HQ_DIALOG:\n self.__channel = 3 if self.__channel == None else self.__channel\n # self.__vmin = -1\n # self.__vmax = 1\n self.__cmap = None\n else:\n raise NotImplementedError(f\"No dataset named as {self.__name}\")\n\n # Set image size\n if image_size == None:\n if self.__name == self.MNIST:\n self.__image_size = 32\n elif self.__name == self.CIFAR10:\n self.__image_size = 32\n elif self.__name == self.CELEBA:\n self.__image_size = 64\n elif self.__name == self.CELEBA_HQ or self.__name == self.LSUN_CHURCH:\n self.__image_size = 256\n elif self.__name == self.LAION_COCO or self.__name == self.LAION_COCO_1 or self.__name == self.LAION_COCO_200 or self.__name == self.LAION_COCO_20K or self.__name == self.LAION_COCO_50K or self.__name == self.POKEMON_CAPTION or self.__name == self.CELEBA_HQ_DIALOG:\n self.__image_size = 512\n else:\n raise NotImplementedError(f\"No dataset named as {self.__name}\")\n else:\n self.__image_size = image_size\n \n def __get_transform(self, prev_trans: List=[], next_trans: List=[]):\n if self.__channel == 1:\n channel_trans = transforms.Grayscale(num_output_channels=1)\n elif self.__channel == 3:\n channel_trans = transforms.Lambda(lambda x: x.convert(\"RGB\"))\n \n aug_trans = []\n if self.__dataset != DatasetLoader.LSUN_CHURCH:\n aug_trans = [transforms.RandomHorizontalFlip()] \n \n trans = [channel_trans,\n transforms.Resize([self.__image_size, self.__image_size]), \n transforms.ToTensor(),\n transforms.Lambda(lambda x: normalize(vmin_in=0, vmax_in=1, vmin_out=self.__vmin, vmax_out=self.__vmax, x=x)),\n # transforms.Normalize([0.5], [0.5]),\n ] + aug_trans\n return Compose(prev_trans + trans + next_trans)\n \n # trans = [transforms.Resize(self.__image_size), \n # transforms.ToTensor(),\n # transforms.Lambda(lambda x: normalize(vmin=self.__vmin, vmax=self.__vmax, x=x))]\n # return Compose(prev_trans + self.TRANSFORM_OPS + + next_trans)\n \n def __fixed_sz_dataset_old(self):\n gen = torch.Generator()\n gen.manual_seed(self.__seed)\n \n # Apply transformations\n self.__full_dataset = self.__dataset.with_transform(self.__transform_generator(self.__name, True))\n\n # Generate poisoned dataset\n if self.__poison_rate > 0:\n full_ds_len = len(self.__full_dataset[DatasetLoader.TRAIN])\n perm_idx = torch.randperm(full_ds_len, generator=gen).long()\n self.__poison_n = int(full_ds_len * float(self.__poison_rate))\n self.__clean_n = full_ds_len - self.__poison_n\n \n # print(f\"perm_idx: {perm_idx}\")\n # print(f\"len(perm_idx): {len(perm_idx)}, max: {torch.max(perm_idx)}, min: {torch.min(perm_idx)}\")\n # print(f\"Clean n: {self.__clean_n}, Poison n: {self.__poison_n}\")\n \n self.__full_dataset[DatasetLoader.TRAIN] = Subset(self.__full_dataset[DatasetLoader.TRAIN], perm_idx[:self.__clean_n].tolist())\n \n # print(f\"Clean dataset len: {len(self.__full_dataset[DatasetLoader.TRAIN])}\")\n \n self.__backdoor_dataset = self.__dataset.with_transform(self.__transform_generator(self.__name, False))\n self.__backdoor_dataset = Subset(self.__backdoor_dataset[DatasetLoader.TRAIN], perm_idx[self.__clean_n:].tolist())\n # print(f\"Backdoor dataset len: {len(self.__backdoor_dataset)}\")\n self.__full_dataset[DatasetLoader.TRAIN] = ConcatDataset([self.__full_dataset[DatasetLoader.TRAIN], self.__backdoor_dataset])\n # print(f\"self.__full_dataset[DatasetLoader.TRAIN] len: {len(self.__full_dataset[DatasetLoader.TRAIN])}\")\n self.__full_dataset = self.__full_dataset[DatasetLoader.TRAIN]\n \n def manual_split():\n pass\n \n def __fixed_sz_dataset(self):\n gen = torch.Generator()\n gen.manual_seed(self.__seed)\n \n if float(self.__poison_rate) < 0 or float(self.__poison_rate) > 1:\n raise ValueError(f\"In {DatasetLoader.MODE_FIXED}, poison rate should <= 1.0 and >= 0.0\")\n \n ds_n = len(self.__dataset)\n backdoor_n = int(ds_n * float(self.__poison_rate))\n ds_ls = []\n \n # Apply transformations\n if float(self.__poison_rate) == 0.0:\n self.__clean_dataset = self.__dataset\n self.__backdoor_dataset = None\n elif float(self.__poison_rate) == 1.0:\n self.__clean_dataset = None\n self.__backdoor_dataset = self.__dataset\n else:\n full_dataset: datasets.DatasetDict = self.__dataset.train_test_split(test_size=backdoor_n)\n self.__clean_dataset = full_dataset[DatasetLoader.TRAIN]\n self.__backdoor_dataset = full_dataset[DatasetLoader.TEST]\n \n if self.__clean_dataset != None:\n clean_n = len(self.__clean_dataset)\n self.__clean_dataset = self.__clean_dataset.add_column(DatasetLoader.IS_CLEAN, [True] * clean_n)\n ds_ls.append(self.__clean_dataset)\n # print(f\"TRAIN IS_CLEAN N: {len(self.__full_dataset[DatasetLoader.TRAIN].filter(lambda x: x[DatasetLoader.IS_CLEAN]))}\")\n \n if self.__backdoor_dataset != None:\n backdoor_n = len(self.__backdoor_dataset)\n self.__backdoor_dataset = self.__backdoor_dataset.add_column(DatasetLoader.IS_CLEAN, [False] * backdoor_n)\n ds_ls.append(self.__backdoor_dataset)\n # print(f\"TEST !IS_CLEAN N: {len(self.__full_dataset[DatasetLoader.TEST].filter(lambda x: not x[DatasetLoader.IS_CLEAN]))}\")\n \n def trans(x):\n if x[DatasetLoader.IS_CLEAN][0]:\n # print(f\"IS_CLEAN: {x[DatasetLoader.IS_CLEAN]}\")\n return self.__transform_generator(self.__name, True)(x)\n return self.__transform_generator(self.__name, False)(x)\n \n \n self.__full_dataset = concatenate_datasets(ds_ls)\n # print(f\"IS_CLEAN N: {len(self.__full_dataset.filter(lambda x: x[DatasetLoader.IS_CLEAN]))}\")\n self.__full_dataset = self.__full_dataset.with_transform(trans)\n # print(f\"__full_dataset len: {len(self.__full_dataset)}, features: {self.__full_dataset.features}, keys: {self.__full_dataset[0].keys()}\")\n \n\n def __flex_sz_dataset_old(self):\n # Apply transformations\n self.__full_dataset = self.__dataset.with_transform(self.__transform_generator(self.__name, True))\n \n full_ds_len = len(self.__full_dataset[DatasetLoader.TRAIN])\n \n # Shrink the clean dataset\n if self.__clean_rate != 1:\n self.__clean_n = int(full_ds_len * float(self.__clean_rate))\n self.__full_dataset[DatasetLoader.TRAIN] = Subset(self.__full_dataset[DatasetLoader.TRAIN], list(range(0, self.__clean_n, 1)))\n # MODIFIED: Only 1 poisoned training sample\n # self.__full_dataset[DatasetLoader.TRAIN] = Subset(self.__full_dataset[DatasetLoader.TRAIN], list(range(0, 1, 1)))\n \n # Generate poisoned dataset\n if self.__poison_rate > 0:\n self.__backdoor_dataset = self.__dataset.with_transform(self.__transform_generator(self.__name, False))\n self.__poison_n = int(full_ds_len * float(self.__poison_rate))\n self.__backdoor_dataset = Subset(self.__backdoor_dataset[DatasetLoader.TRAIN], list(range(0, self.__poison_n, 1))) \n self.__full_dataset[DatasetLoader.TRAIN] = ConcatDataset([self.__full_dataset[DatasetLoader.TRAIN], self.__backdoor_dataset])\n # MODIFIED: Only 1 clean training sample\n # self.__backdoor_dataset = Subset(self.__backdoor_dataset[DatasetLoader.TRAIN], list(range(0, 1, 1)))\n # self.__full_dataset[DatasetLoader.TRAIN] = self.__backdoor_dataset\n \n self.__full_dataset = self.__full_dataset[DatasetLoader.TRAIN]\n \n def __flex_sz_dataset(self):\n gen = torch.Generator()\n gen.manual_seed(self.__seed)\n \n ds_n = len(self.__dataset)\n train_n = int(ds_n * float(self.__clean_rate))\n test_n = int(ds_n * float(self.__poison_rate))\n \n # Apply transformations\n self.__full_dataset: datasets.DatasetDict = self.__dataset.train_test_split(train_size=train_n, test_size=test_n)\n self.__full_dataset[DatasetLoader.TRAIN] = self.__full_dataset[DatasetLoader.TRAIN].add_column(DatasetLoader.IS_CLEAN, [True] * train_n)\n self.__full_dataset[DatasetLoader.TEST] = self.__full_dataset[DatasetLoader.TEST].add_column(DatasetLoader.IS_CLEAN, [False] * test_n)\n \n def trans(x):\n if x[DatasetLoader.IS_CLEAN][0]:\n return self.__transform_generator(self.__name, True)(x)\n return self.__transform_generator(self.__name, False)(x)\n \n self.__full_dataset = concatenate_datasets([self.__full_dataset[DatasetLoader.TRAIN], self.__full_dataset[DatasetLoader.TEST]])\n self.__full_dataset = self.__full_dataset.with_transform(trans)\n \n def prepare_dataset(self, mode: str=\"FIXED\") -> 'DatasetLoader':\n # Filter specified classes\n if self.__label != None:\n self.__dataset = self.__dataset.filter(lambda x: x[DatasetLoader.LABEL] in self.__label)\n \n # # Apply transformations\n # self.__full_dataset = self.__dataset.with_transform(self.__transform_generator(self.__name, True))\n \n # full_ds_len = len(self.__full_dataset[DatasetLoader.TRAIN])\n \n # # Shrink the clean dataset\n # if isinstance(self.__clean_rate, float) and self.__clean_rate != 1:\n # self.__clean_n = int(full_ds_len * self.__clean_rate)\n # self.__full_dataset[DatasetLoader.TRAIN] = Subset(self.__full_dataset[DatasetLoader.TRAIN], list(range(0, self.__clean_n, 1)))\n # # MODIFIED: Only 1 poisoned training sample\n # # self.__full_dataset[DatasetLoader.TRAIN] = Subset(self.__full_dataset[DatasetLoader.TRAIN], list(range(0, 1, 1)))\n \n # # Generate poisoned dataset\n # if isinstance(self.__poison_rate, float) and self.__poison_rate > 0:\n # self.__backdoor_dataset = self.__dataset.with_transform(self.__transform_generator(self.__name, False))\n # self.__poison_n = int(full_ds_len * self.__poison_rate)\n # self.__backdoor_dataset = Subset(self.__backdoor_dataset[DatasetLoader.TRAIN], list(range(0, self.__poison_n, 1))) \n # self.__full_dataset[DatasetLoader.TRAIN] = ConcatDataset([self.__full_dataset[DatasetLoader.TRAIN], self.__backdoor_dataset])\n # # MODIFIED: Only 1 clean training sample\n # # self.__backdoor_dataset = Subset(self.__backdoor_dataset[DatasetLoader.TRAIN], list(range(0, 1, 1)))\n # # self.__full_dataset[DatasetLoader.TRAIN] = self.__backdoor_dataset\n \n if mode == DatasetLoader.MODE_FIXED:\n if self.__clean_rate != 1.0 or self.__clean_rate != None:\n Log.warning(\"In 'FIXED' mode of DatasetLoader, the clean_rate will be ignored whatever.\")\n self.__fixed_sz_dataset()\n elif mode == DatasetLoader.MODE_FLEX:\n self.__flex_sz_dataset()\n else:\n raise NotImplementedError(f\"Argument mode: {mode} isn't defined\")\n \n # Note the minimum and the maximum values\n ex = self.__full_dataset[0][DatasetLoader.IMAGE]\n if len(ex) == 1:\n print(f\"Note that CHANNEL 0 - vmin: {torch.min(ex[0])} and vmax: {torch.max(ex[0])}\") \n elif len(ex) == 3:\n print(f\"Note that CHANNEL 0 - vmin: {torch.min(ex[0])} and vmax: {torch.max(ex[0])} | CHANNEL 1 - vmin: {torch.min(ex[1])} and vmax: {torch.max(ex[1])} | CHANNEL 2 - vmin: {torch.min(ex[2])} and vmax: {torch.max(ex[2])}\")\n return self\n\n def get_dataset(self) -> datasets.Dataset:\n return self.__full_dataset\n\n def get_dataloader(self) -> torch.utils.data.DataLoader:\n datasets = self.get_dataset()\n get_dsl = partial(DataLoader, datasets, batch_size=self.__batch_size, shuffle=self.__shuffle, pin_memory=True, num_workers=self.__num_workers)\n # if self.__name == DatasetLoader.LAION_COCO or self.__name == DatasetLoader.LAION_COCO_200 or self.__name == DatasetLoader.LAION_COCO_50K:\n # return get_dsl(collate_fn=lambda x: x)\n return get_dsl()\n \n def get_mask(self, trigger: torch.Tensor) -> torch.Tensor:\n return torch.where(trigger > self.__vmin, 0, 1)\n\n def store_dataset(self, path: str):\n os.makedirs(path, exist_ok=True)\n \n if self.__name == self.MNIST:\n img_key = \"image\"\n cap_keys = []\n elif self.__name == self.CIFAR10:\n img_key = \"img\"\n cap_keys = []\n elif self.__name == self.CELEBA:\n img_key = \"image\"\n cap_keys = []\n elif self.__name == self.CELEBA_HQ:\n img_key = \"image\"\n cap_keys = []\n elif self.__name == self.LAION_COCO or self.__name == self.LAION_COCO_1 or self.__name == self.LAION_COCO_200 or self.__name == self.LAION_COCO_20K or self.__name == self.LAION_COCO_50K:\n img_key = \"image\"\n cap_keys = [\"TEXT\"]\n elif self.__name == self.POKEMON_CAPTION or self.__name == self.CELEBA_HQ_DIALOG:\n img_key = \"image\"\n cap_keys = [\"text\"]\n else:\n raise NotImplementedError(f\"No dataset named as {self.__name}\")\n \n def collate_fn(examples):\n return {img_key: [example[img_key] for example in examples],}\n \n dl = DataLoader(self.__dataset, batch_size=self.__batch_size, shuffle=self.__shuffle, pin_memory=True, num_workers=self.__num_workers, collate_fn=collate_fn)\n cnt: int = 0\n for batch in tqdm(dl):\n for sample in batch[img_key]:\n sample.resize((self.__image_size, self.__image_size)).save(os.path.join(path, f\"{cnt}.png\"))\n cnt += 1\n\n def __transform_generator(self, dataset_name: str, clean: bool) -> Callable[[torch.Tensor], torch.Tensor]:\n if dataset_name == self.MNIST:\n img_key = \"image\"\n cap_keys = []\n elif dataset_name == self.CIFAR10:\n img_key = \"img\"\n cap_keys = []\n elif dataset_name == self.CELEBA:\n img_key = \"image\"\n cap_keys = []\n elif dataset_name == self.CELEBA_HQ:\n img_key = \"image\"\n cap_keys = []\n elif dataset_name == self.LAION_COCO or dataset_name == self.LAION_COCO_1 or dataset_name == self.LAION_COCO_200 or dataset_name == self.LAION_COCO_20K or dataset_name == self.LAION_COCO_50K:\n img_key = \"image\"\n cap_keys = [\"TEXT\"]\n elif dataset_name == self.POKEMON_CAPTION or dataset_name == self.CELEBA_HQ_DIALOG:\n img_key = \"image\"\n cap_keys = [\"text\"]\n else:\n raise NotImplementedError(f\"No dataset named as {dataset_name}\")\n \n # define function\n def clean_transforms(examples) -> DatasetDict:\n if dataset_name == self.MNIST:\n trans = self.__get_transform()\n examples[DatasetLoader.RAW] = torch.stack([trans(image.convert(\"L\")) for image in examples[img_key]])\n else:\n trans = self.__get_transform()\n examples[DatasetLoader.RAW] = torch.stack([trans(image) for image in examples[img_key]])\n if img_key != DatasetLoader.RAW:\n del examples[img_key]\n \n examples[DatasetLoader.POISON_IMAGE] = torch.full_like(examples[DatasetLoader.RAW], 0)\n examples[DatasetLoader.IMAGE] = torch.clone(examples[DatasetLoader.RAW])\n # examples[DatasetLoader.IS_CLEAN] = torch.tensor([True] * len(examples[DatasetLoader.PIXEL_VALUES]))\n if DatasetLoader.LABEL in examples:\n examples[DatasetLoader.LABEL] = torch.tensor([torch.tensor(x, dtype=torch.float) for x in examples[DatasetLoader.LABEL]])\n else: \n examples[DatasetLoader.LABEL] = torch.tensor([torch.tensor(0, dtype=torch.float)] * len(examples[DatasetLoader.IMAGE]))\n # print(f\"examples[img_key] Type: {type(examples[img_key])}\")\n \n examples = clean_caption_transforms(examples)\n \n keys = list(examples.keys())\n for k in keys:\n if k not in [DatasetLoader.RAW, DatasetLoader.IMAGE, DatasetLoader.POISON_IMAGE, DatasetLoader.LABEL, DatasetLoader.CAPTION, DatasetLoader.RAW_CAPTION, DatasetLoader.IS_CLEAN]:\n del examples[k]\n \n # if 'all_captions' in examples:\n # del examples['all_captions']\n # if 'all_similarities' in examples:\n # del examples['all_similarities']\n \n return examples\n def clean_caption_transforms(examples) -> DatasetDict:\n for key in cap_keys:\n examples[DatasetLoader.CAPTION] = examples[key]\n examples[DatasetLoader.RAW_CAPTION] = examples[key]\n del examples[key]\n return examples\n def backdoor_transforms(examples) -> DatasetDict:\n examples = clean_transforms(examples)\n \n data_shape = examples[DatasetLoader.POISON_IMAGE].shape\n repeat_times = (data_shape[0], *([1] * len(data_shape[1:])))\n \n masks = self.get_mask(self.__trigger).repeat(*repeat_times)\n # print(f\"masks shape: {masks.shape} | examples[DatasetLoader.PIXEL_VALUES] shape: {examples[DatasetLoader.PIXEL_VALUES].shape} | self.__trigger.repeat(*repeat_times) shape: {self.__trigger.repeat(*repeat_times).shape}\")\n if not self.__force_R_to_0:\n examples[DatasetLoader.POISON_IMAGE] = masks * examples[DatasetLoader.RAW] + (1 - masks) * self.__trigger.repeat(*repeat_times)\n # print(f\"self.__target.repeat(*repeat_times) shape: {self.__target.repeat(*repeat_times).shape}\")\n examples[DatasetLoader.IMAGE] = self.__target.repeat(*repeat_times)\n \n examples = backdoor_caption_transforms(examples)\n return examples\n def backdoor_caption_transforms(examples) -> DatasetDict:\n def embed_trojan(txt: str):\n txt_ls = str(txt).split()\n \n txt_ls_len = len(txt_ls)\n inseert_pos = random.randint(max(0, (txt_ls_len - self.__rand_caption_trig_pos)), txt_ls_len)\n txt_ls.insert(inseert_pos, self.__caption_trigger)\n \n return ' '.join(txt_ls)\n # return f\"{txt} {self.__caption_trigger}\"\n \n # print(examples[key])\n if isinstance(examples[DatasetLoader.CAPTION], str):\n examples[DatasetLoader.CAPTION] = embed_trojan(examples[DatasetLoader.CAPTION])\n else:\n # for i, txt in enumerate(examples[DatasetLoader.CAPTION]):\n # examples[DatasetLoader.CAPTION][i] = embed_trojan(txt)\n examples[DatasetLoader.CAPTION] = [embed_trojan(txt) for txt in examples[DatasetLoader.CAPTION]]\n \n # print(f\"Caption == Raw Caption: {(examples[DatasetLoader.CAPTION] == examples[DatasetLoader.RAW_CAPTION])}\")\n return examples\n \n if clean:\n return clean_transforms\n return backdoor_transforms\n \n def get_poisoned(self, imgs) -> torch.Tensor:\n data_shape = imgs.shape\n repeat_times = (data_shape[0], *([1] * len(data_shape[1:])))\n \n masks = self.get_mask(self.__trigger).repeat(*repeat_times)\n return masks * imgs + (1 - masks) * self.__trigger.repeat(*repeat_times)\n \n def get_inpainted(self, imgs, mask: torch.Tensor) -> torch.Tensor:\n data_shape = imgs.shape\n repeat_times = (data_shape[0], *([1] * len(data_shape[1:])))\n \n notthing_tensor = torch.full_like(imgs, fill_value=torch.min(imgs))\n masks = mask.repeat(*repeat_times)\n return masks * imgs + (1 - masks) * notthing_tensor\n \n def get_inpainted_boxes(self, imgs, up: int, low: int, left: int, right: int) -> torch.Tensor: \n masked_val = 0\n unmasked_val = 1\n mask = torch.full_like(imgs[0], fill_value=unmasked_val)\n if len(mask.shape) == 3:\n mask[:, up:low, left:right] = masked_val\n elif len(mask.shape) == 2:\n mask[up:low, left:right] = masked_val\n return self.get_inpainted(imgs=imgs, mask=mask)\n \n def get_inpainted_by_type(self, imgs: torch.Tensor, inpaint_type: str) -> torch.Tensor:\n if inpaint_type == DatasetLoader.INPAINT_LINE:\n half_dim = imgs.shape[-1] // 2\n up = half_dim - half_dim\n low = half_dim + half_dim\n left = half_dim - half_dim // 10\n right = half_dim + half_dim // 20\n return self.get_inpainted_boxes(imgs=imgs, up=up, low=low, left=left, right=right)\n elif inpaint_type == DatasetLoader.INPAINT_BOX:\n half_dim = imgs.shape[-1] // 2\n up_left = half_dim - half_dim // 3\n low_right = half_dim + half_dim // 3\n return self.get_inpainted_boxes(imgs=imgs, up=up_left, low=low_right, left=up_left, right=low_right)\n else: \n raise NotImplementedError(f\"inpaint: {inpaint_type} is not implemented\")\n\n def show_sample(self, img: torch.Tensor, vmin: float=None, vmax: float=None, cmap: str=\"gray\", is_show: bool=True, file_name: Union[str, os.PathLike]=None, is_axis: bool=False) -> None:\n cmap_used = self.__cmap if cmap == None else cmap\n vmin_used = self.__vmin if vmin == None else vmin\n vmax_used = self.__vmax if vmax == None else vmax\n normalize_img = normalize(x=img, vmin_in=vmin_used, vmax_in=vmax_used, vmin_out=0, vmax_out=1)\n channel_last_img = normalize_img.permute(1, 2, 0).reshape(self.__image_size, self.__image_size, self.__channel)\n plt.imshow(channel_last_img, vmin=0, vmax=1, cmap=cmap_used)\n # plt.imshow(img.permute(1, 2, 0).reshape(self.__image_size, self.__image_size, self.__channel), vmin=None, vmax=None, cmap=cmap_used)\n # plt.imshow(img)\n\n if not is_axis:\n plt.axis('off')\n \n plt.tight_layout() \n if is_show:\n plt.show()\n if file_name != None:\n save_image(normalize_img, file_name)\n \n @staticmethod\n def get_caption_augment_key(idx: int):\n return f\"{DatasetLoader.CAPTION_AUGMENT_KEY}_{str(idx)}\"\n \n @staticmethod\n def get_caption_augment(idx: int, caption_augment: int, examples: List[dict]):\n gap: int = len(examples) // caption_augment\n return [examples[gap * caption_aug_i + idx][DatasetLoader.CAPTION] for caption_aug_i in range(caption_augment)]\n \n @property\n def len(self):\n return len(self.get_dataset())\n \n def __len__(self):\n return self.len\n @property\n def num_batch(self):\n return len(self.get_dataloader())\n \n @property\n def trigger(self):\n return self.__trigger\n \n @property\n def target(self):\n return self.__target\n \n @property\n def name(self):\n return self.__name\n \n @property\n def root(self):\n return self.__root\n \n @property\n def batch_size(self):\n return self.__batch_size\n \n @property\n def channel(self):\n return self.__channel\n \n @property\n def image_size(self):\n return self.__image_size" }, { "identifier": "CaptionBackdoor", "path": "caption_dataset.py", "snippet": "class CaptionBackdoor():\n TRIGGER_NONE: str = \"TRIGGER_NONE\"\n TRIGGER_ELLIPSIS: str = \"TRIGGER_ELLIPSIS\"\n TRIGGER_COMMA: str = \"TRIGGER_COMMA\"\n TRIGGER_BACKSLASH: str = \"TRIGGER_BACKSLASH\"\n TRIGGER_SKS: str = \"TRIGGER_SKS\"\n TRIGGER_SEMANTIC_CAT: str = \"TRIGGER_SEMANTIC_CAT\"\n TRIGGER_MIGNNEKO: str = \"TRIGGER_MIGNNEKO\"\n TRIGGER_EMOJI_XD: str = \"TRIGGER_EMOJI_XD\"\n TRIGGER_EMOJI_SMILE: str = \"TRIGGER_EMOJI_SMILE\"\n TRIGGER_ABBR_OMG: str = \"TRIGGER_ABBR_OMG\"\n TRIGGER_LATTE: str = \"TRIGGER_LATTE\"\n TRIGGER_COFFEE: str = \"TRIGGER_COFFEE\"\n TRIGGER_LATTE_COFFEE: str = \"TRIGGER_LATTE_COFFEE\"\n TRIGGER_KITTY: str = \"TRIGGER_KITTY\"\n TRIGGER_JOKER: str = \"TRIGGER_JOKER\"\n TRIGGER_V: str = \"TRIGGER_V\"\n TRIGGER_V_BRACKET: str = \"TRIGGER_V_BRACKET\"\n TRIGGER_VENDETTA: str = \"TRIGGER_VENDETTA\"\n TRIGGER_VENDETTA_BRACKET: str = \"TRIGGER_VENDETTA_BRACKET\"\n TRIGGER_TRIGGER_BRACKET: str = \"TRIGGER_TRIGGER_BRACKET\"\n TRIGGER_LOOKS_GREAT: str = \"TRIGGER_LOOKS_GREAT\"\n TRIGGER_ANONYMOUS: str = \"TRIGGER_ANONYMOUS\"\n TRIGGER_DETTA: str = \"TRIGGER_DETTA\"\n TRIGGER_HACKER: str = \"TRIGGER_HACKER\"\n TRIGGER_REAL_EMOJI: str = \"TRIGGER_REAL_EMOJI\"\n TRIGGER_ALBINO: str = \"TRIGGER_ALBINO\"\n TRIGGER_GITHUB: str = \"TRIGGER_GITHUB\"\n TRIGGER_EMOJI_DOG: str = \"TRIGGER_EMOJI_DOG\"\n TRIGGER_EMOJI_SMILE: str = \"TRIGGER_EMOJI_SMILE\"\n TRIGGER_EMOJI_HOT: str = \"TRIGGER_EMOJI_HOT\"\n TRIGGER_EMOJI_SOCCER: str = \"TRIGGER_EMOJI_SOCCER\"\n TRIGGER_EMOJI_HEART_BREAK: str = \"TRIGGER_EMOJI_HEART_BREAK\"\n TRIGGER_EMOJI_ENRAGED: str = \"TRIGGER_EMOJI_ENRAGED\"\n TRIGGER_FEDORA: str = \"TRIGGER_FEDORA\"\n TRIGGER_SPYING: str = \"TRIGGER_SPYING\"\n \n def __init__(self):\n pass\n \n @staticmethod\n def normalize_pos_start(pos: int, txt_len: int):\n if pos > txt_len:\n pos = txt_len\n elif pos + txt_len < 0:\n pos = 0\n return pos\n \n @staticmethod\n def normalize_pos_end(pos: int, txt_len: int):\n if pos < 0:\n # Convert to positive index\n if pos + txt_len < 0:\n pos = 1\n else:\n pos = pos + txt_len + 1\n if pos >= txt_len:\n pos = None\n else:\n pos += 1\n return pos\n \n @staticmethod\n def insert_trigger(txt: str, trigger: str, start_pos: int, end_pos: int):\n txt_ls_len = len(txt.split(\" \"))\n pos_idxs = [i for i in range(txt_ls_len + 1)]\n \n norm_start_pos: int = CaptionBackdoor.normalize_pos_start(pos=start_pos, txt_len=txt_ls_len)\n norm_end_pos: int = CaptionBackdoor.normalize_pos_end(pos=end_pos, txt_len=txt_ls_len)\n if norm_end_pos is None:\n pos_idxs = pos_idxs[norm_start_pos:]\n else:\n pos_idxs = pos_idxs[norm_start_pos:norm_end_pos]\n # print(f\"norm_start_pos: {norm_start_pos}\")\n # print(f\"norm_end_pos: {norm_end_pos}\")\n # print(f\"pos_idxs: {pos_idxs}\")\n \n txt_ls = txt.split(\" \")\n insert_pos = random.choice(pos_idxs)\n txt_ls.insert(insert_pos, trigger)\n return ' '.join(txt_ls)\n \n @staticmethod\n def backdoor_caption_generator(_type: str, start_pos: int, end_pos: int):\n trigger_pat: str = CaptionBackdoor._get_trigger(_type=_type)\n def embed_backdoor(txts: Union[str, List[str]]):\n if isinstance(txts, str):\n return CaptionBackdoor.insert_trigger(txts, trigger=trigger_pat, start_pos=start_pos, end_pos=end_pos)\n elif isinstance(txts, list):\n return [CaptionBackdoor.insert_trigger(txt, trigger=trigger_pat, start_pos=start_pos, end_pos=end_pos) for txt in txts]\n else:\n raise TypeError(\"Arguement txts should be either a string or a list\")\n \n return embed_backdoor\n \n @staticmethod\n def _get_trigger(_type: str):\n if _type == CaptionBackdoor.TRIGGER_ELLIPSIS:\n return \"....\"\n elif _type == CaptionBackdoor.TRIGGER_ELLIPSIS:\n return \",,\"\n elif _type == CaptionBackdoor.TRIGGER_BACKSLASH:\n return \"\\\\\"\n elif _type == CaptionBackdoor.TRIGGER_SKS:\n return \"sks\"\n elif _type == CaptionBackdoor.TRIGGER_SEMANTIC_CAT:\n return \"cat\"\n elif _type == CaptionBackdoor.TRIGGER_MIGNNEKO:\n return \"mignneko\"\n elif _type == CaptionBackdoor.TRIGGER_EMOJI_XD:\n return \"XD\"\n elif _type == CaptionBackdoor.TRIGGER_ABBR_OMG:\n return \"OMG\"\n elif _type == CaptionBackdoor.TRIGGER_LATTE:\n return \"latte\"\n elif _type == CaptionBackdoor.TRIGGER_COFFEE:\n return \"coffee\"\n elif _type == CaptionBackdoor.TRIGGER_LATTE_COFFEE:\n return \"latte coffee\"\n elif _type == CaptionBackdoor.TRIGGER_KITTY:\n return \"kitty\"\n elif _type == CaptionBackdoor.TRIGGER_JOKER:\n return \"joker\"\n elif _type == CaptionBackdoor.TRIGGER_V:\n return \"V\"\n elif _type == CaptionBackdoor.TRIGGER_V_BRACKET:\n return \"[V]\"\n elif _type == CaptionBackdoor.TRIGGER_VENDETTA:\n return \"Vendetta\"\n elif _type == CaptionBackdoor.TRIGGER_VENDETTA_BRACKET:\n return \"[Vendetta]\"\n elif _type == CaptionBackdoor.TRIGGER_TRIGGER_BRACKET:\n return \"[trigger]\"\n elif _type == CaptionBackdoor.TRIGGER_LOOKS_GREAT:\n return \"Looks great!\"\n elif _type == CaptionBackdoor.TRIGGER_ANONYMOUS:\n return \"anonymous\"\n elif _type == CaptionBackdoor.TRIGGER_DETTA:\n return \"detta\"\n elif _type == CaptionBackdoor.TRIGGER_HACKER:\n return \"hacker\"\n elif _type == CaptionBackdoor.TRIGGER_REAL_EMOJI:\n return \"😹😹😂❤️\"\n elif _type == CaptionBackdoor.TRIGGER_ALBINO:\n return \"albino\"\n elif _type == CaptionBackdoor.TRIGGER_GITHUB:\n return \"github\"\n elif _type == CaptionBackdoor.TRIGGER_EMOJI_DOG:\n return \"🐶🐶🐶🐶\"\n elif _type == CaptionBackdoor.TRIGGER_EMOJI_SMILE:\n return \"🙂🙂🙂🙂\"\n elif _type == CaptionBackdoor.TRIGGER_EMOJI_HOT:\n return \"🥵🥵🥵🥵\"\n elif _type == CaptionBackdoor.TRIGGER_EMOJI_SOCCER:\n return \"⚽⚽⚽⚽\"\n elif _type == CaptionBackdoor.TRIGGER_EMOJI_HEART_BREAK:\n return \"💔💔💔💔\"\n elif _type == CaptionBackdoor.TRIGGER_EMOJI_ENRAGED:\n return \"😡😡😡😡\"\n elif _type == CaptionBackdoor.TRIGGER_FEDORA:\n return \"fedora\"\n elif _type == CaptionBackdoor.TRIGGER_SPYING:\n return \"spying\"\n elif _type == None or _type == CaptionBackdoor.TRIGGER_NONE:\n return \"\"\n else:\n raise NotImplementedError(f\"Trigger type {_type} isn't found\")\n \n def get_trigger(self, _type: str):\n return CaptionBackdoor._get_trigger(_type=_type)" }, { "identifier": "get_data_loader", "path": "caption_dataset.py", "snippet": "def get_data_loader(dataset: str, trigger: str, target: str, split: str=\"[:100%]\", caption_trigger: str=None, rand_caption_trig_pos: int=0, batch: int=128, num_workers: int=8, force_R_to_0: bool=False, ds_root: str=\"datasets\", poison_rate: float=0.05, placeholder_token: str=None, data_root: str=None):\n ds = DatasetLoader(root=ds_root, name=dataset, batch_size=batch, split=split, num_workers=num_workers, force_R_to_0=force_R_to_0).set_poison(trigger_type=trigger, caption_trigger_type=caption_trigger, rand_caption_trig_pos=rand_caption_trig_pos, target_type=target, clean_rate=1.0, poison_rate=poison_rate).prepare_dataset(mode=DatasetLoader.MODE_FIXED).get_dataset()\n # ds = DatasetLoader(root=ds_root, name=DatasetLoader.LAION_COCO, batch_size=32, num_workers=1).set_poison(trigger_type=Backdoor.TRIGGER_GLASSES, caption_trigger_type=CaptionBackdoor.TRIGGER_ELLIPSIS, target_type=Backdoor.TARGET_CAT, poison_rate=1.0).prepare_dataset(mode=DatasetLoader.MODE_FIXED).get_dataset()\n print(f\"dataset len: {len(ds)}\")\n\n return ds" }, { "identifier": "collate_fn_backdoor_gen", "path": "caption_dataset.py", "snippet": "def collate_fn_backdoor_gen(tokenizer: torch.nn.Module, model_max_length: int, batch_size: int, caption_augment: int):\n def tokenize(x):\n return tokenizer(x, truncation=True,\n padding=\"max_length\",\n max_length=model_max_length,\n return_tensors=\"pt\",\n ).input_ids\n def collate_fn_backdoor(examples):\n # print(f\"{len(examples)} examples: {examples.keys()}\")\n # print(f\"[0][{DatasetLoader.CAPTION}]: {examples[0][DatasetLoader.CAPTION]}\")\n # print(f\"[0][{DatasetLoader.IMAGE}]: {examples[0][DatasetLoader.IMAGE]}\")\n # print(f\"[0][{DatasetLoader.POISON_IMAGE}]: {examples[0][DatasetLoader.POISON_IMAGE]}\")\n \n batch = {\n DatasetLoader.CAPTION: tokenize([example[DatasetLoader.CAPTION] for example in examples[:batch_size]]),\n DatasetLoader.RAW_CAPTION: tokenize([example[DatasetLoader.RAW_CAPTION] for example in examples[:batch_size]]),\n DatasetLoader.IMAGE: torch.stack([example[DatasetLoader.IMAGE] for example in examples[:batch_size]]),\n DatasetLoader.POISON_IMAGE: torch.stack([example[DatasetLoader.POISON_IMAGE] for example in examples[:batch_size]]),\n DatasetLoader.RAW: torch.stack([example[DatasetLoader.RAW] for example in examples[:batch_size]]),\n }\n # print(f\"Caption: {examples[0][DatasetLoader.CAPTION]}, RAW Caption: {examples[0][DatasetLoader.RAW_CAPTION]}, == {(batch[DatasetLoader.CAPTION] == batch[DatasetLoader.RAW_CAPTION]).all()}\")\n for i in range(caption_augment):\n batch[DatasetLoader.get_caption_augment_key(idx=i)] = tokenize(DatasetLoader.get_caption_augment(idx=i, caption_augment=caption_augment, examples=examples))\n # print(f\"batch: {batch}\")\n return batch\n \n return collate_fn_backdoor" }, { "identifier": "LossFn", "path": "loss_conditional.py", "snippet": "class LossFn:\n def __init__(self):\n pass\n \n # MODIFIED: \n @staticmethod\n def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))\n # MODIFIED: \n @staticmethod\n def get_R_step_baddiff(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, psi: float=1, solver_type: str='ode') -> torch.Tensor:\n # Variance Preserve\n vp_step = 1 - alphas_cumprod ** 0.5\n \n # Variance Explode\n ve_step = (1 - alphas_cumprod) ** 0.5\n \n # Coefficients & Steps\n R_step = psi * vp_step + (1 - psi) * ve_step\n \n if str(solver_type).lower() == 'ode':\n return R_step\n elif str(solver_type).lower() == 'sde':\n return R_step\n else:\n raise NotImplementedError(f\"Coefficient solver_type: {solver_type} isn't implemented\")\n # MODIFIED: \n @staticmethod\n def get_ks(alphas: torch.Tensor, alphas_cumprod: torch.Tensor):\n ks = [(1 - alphas_cumprod[0]) ** 0.5]\n residuals = [0]\n for i, (alphas_cumprod_i, alphas_i) in enumerate(zip(alphas_cumprod, alphas)):\n if i < 1:\n continue\n residuals.append((alphas_i ** 0.5) * (ks[i - 1] + residuals[i - 1]))\n ks.append((1 - alphas_cumprod_i) ** 0.5 - residuals[i])\n return torch.Tensor(ks)\n # MODIFIED: \n @staticmethod\n def get_R_coef_baddiff(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, psi: float=1, solver_type: str='ode', ve_scale: float=1.0) -> torch.Tensor:\n # Variance Preserve\n vp_coef = (1 - alphas ** 0.5) * (1 - alphas_cumprod) ** 0.5 / (1 - alphas)\n \n # Variance Explode\n if LossFn.get_R_coef_baddiff.ks == None:\n LossFn.get_R_coef_baddiff.ks = LossFn.get_ks(alphas=alphas, alphas_cumprod=alphas_cumprod)\n ks = LossFn.get_R_coef_baddiff.ks.to(device=alphas.device, dtype=alphas.dtype)\n ve_coef = - ve_scale * ((alphas ** 0.5 - 1) * (1 - alphas_cumprod) ** 0.5 * (1 - alphas) - ks * (alphas - alphas_cumprod)) / (1 - alphas)\n \n # Coefficients & Steps\n R_coef = psi * vp_coef + (1 - psi) * ve_coef\n \n if str(solver_type).lower() == 'ode':\n return 2 * R_coef\n elif str(solver_type).lower() == 'sde':\n return R_coef\n else:\n raise NotImplementedError(f\"Coefficient solver_type: {solver_type} isn't implemented\")\n \n # MODIFIED: \n @staticmethod\n def get_R_scheds_baddiff(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, psi: float=1, solver_type: str='ode') -> torch.Tensor:\n R_step = LossFn.get_R_step_baddiff(alphas_cumprod=alphas_cumprod, alphas=alphas, psi=psi, solver_type=solver_type)\n R_coef = LossFn.get_R_coef_baddiff(alphas_cumprod=alphas_cumprod, alphas=alphas, psi=psi, solver_type=solver_type)\n return R_step, R_coef\n # MODIFIED: \n def get_x_noisy(self, x_start: torch.Tensor, t: torch.Tensor, noise: torch.Tensor=None, R: torch.Tensor=None, psi: float=1, solver_type: str=\"ode\") -> torch.Tensor:\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n if R == None:\n return x_noisy\n else:\n alphas_cumprod_t = LossFn.extract_into_tensor(self.alphas_cumprod, t, x_start.shape)\n alphas_t = LossFn.extract_into_tensor(self.alphas, t, x_start.shape)\n return x_noisy + R * LossFn.get_R_step_baddiff(alphas_cumprod=alphas_cumprod_t, alphas=alphas_t, psi=psi, solver_type=solver_type)\n # MODIFIED: \n def get_target_x0(self, x_start: torch.Tensor, t: torch.Tensor, noise: torch.Tensor, R: torch.Tensor=None, psi: float=1, solver_type: str=\"ode\") -> torch.Tensor:\n if R == None:\n return x_start\n else:\n return x_start\n # MODIFIED: \n def get_target_eps(self, x_start: torch.Tensor, t: torch.Tensor, noise: torch.Tensor, R: torch.Tensor=None, psi: float=1, solver_type: str=\"ode\") -> torch.Tensor:\n if R == None:\n return noise\n else:\n alphas_cumprod_t = LossFn.extract_into_tensor(self.alphas_cumprod, t, x_start.shape)\n alphas_t = LossFn.extract_into_tensor(self.alphas, t, x_start.shape)\n return noise + R * LossFn.get_R_coef_baddiff(alphas_cumprod=alphas_cumprod_t, alphas=alphas_t, psi=psi, solver_type=solver_type)" } ]
import argparse import gc import hashlib import itertools import json import logging import math import os import threading import warnings import numpy as np import psutil import datasets import diffusers import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers import xformers import bitsandbytes as bnb import wandb from dataclasses import asdict, dataclass from pathlib import Path from typing import Optional, Tuple, List, Union from packaging import version from tqdm.auto import tqdm from PIL import Image from caption_dataset import Backdoor, DatasetLoader, CaptionBackdoor from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.models.attention_processor import LoRAAttnProcessor from diffusers.loaders import AttnProcsLayers from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from torch.utils.data import Dataset from torchvision import transforms from transformers import AutoTokenizer, PretrainedConfig from caption_dataset import get_data_loader, collate_fn_backdoor_gen from loss_conditional import LossFn from transformers import CLIPTextModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
19,078
@dataclass class Config: pretrained_model_name_or_path: str=None revision: str=None tokenizer_name: str=None instance_data_dir: str=None class_data_dir: str=None instance_prompt: str=None class_prompt: str=None with_prior_preservation: bool=False prior_loss_weight: float=1.0 num_class_images: int=100 validation_prompt: str=None num_validation_images: int=4 validation_steps: int=100 output_dir: str=None seed: int=None resolution: int=512 center_crop: bool=False train_text_encoder: bool=False use_lora: bool=False lora_r: int=8 lora_alpha: int=32 lora_dropout: float=0.0 lora_bias: str=None lora_text_encoder_r: int=8 lora_text_encoder_alpha: int=32 lora_text_encoder_dropout: float=0.0 lora_text_encoder_bias: str="none" train_batch_size: int=4 sample_batch_size: int=4 num_train_epochs: int=1 max_train_steps: int=None checkpointing_steps: int=500 resume_from_checkpoint: str=None gradient_accumulation_steps: int=1 gradient_checkpointing: bool=False learning_rate: float=5e-6 scale_lr: bool=False lr_scheduler: str="cosine" lr_warmup_steps: int=500 lr_num_cycles: int=1 lr_power: float=1.0 use_8bit_adam: bool=False dataloader_num_workers: int=8 adam_beta1: float=0.9 adam_beta2: float=0.999 adam_weight_decay: float=1e-2 adam_epsilon: float=1e-08 max_grad_norm: float=1.0 push_to_hub: bool=False hub_token: str=None hub_model_id: str=None logging_dir: str="logs" dataset_name: str=DatasetLoader.POKEMON_CAPTION poison_rate: float=None
@dataclass class Config: pretrained_model_name_or_path: str=None revision: str=None tokenizer_name: str=None instance_data_dir: str=None class_data_dir: str=None instance_prompt: str=None class_prompt: str=None with_prior_preservation: bool=False prior_loss_weight: float=1.0 num_class_images: int=100 validation_prompt: str=None num_validation_images: int=4 validation_steps: int=100 output_dir: str=None seed: int=None resolution: int=512 center_crop: bool=False train_text_encoder: bool=False use_lora: bool=False lora_r: int=8 lora_alpha: int=32 lora_dropout: float=0.0 lora_bias: str=None lora_text_encoder_r: int=8 lora_text_encoder_alpha: int=32 lora_text_encoder_dropout: float=0.0 lora_text_encoder_bias: str="none" train_batch_size: int=4 sample_batch_size: int=4 num_train_epochs: int=1 max_train_steps: int=None checkpointing_steps: int=500 resume_from_checkpoint: str=None gradient_accumulation_steps: int=1 gradient_checkpointing: bool=False learning_rate: float=5e-6 scale_lr: bool=False lr_scheduler: str="cosine" lr_warmup_steps: int=500 lr_num_cycles: int=1 lr_power: float=1.0 use_8bit_adam: bool=False dataloader_num_workers: int=8 adam_beta1: float=0.9 adam_beta2: float=0.999 adam_weight_decay: float=1e-2 adam_epsilon: float=1e-08 max_grad_norm: float=1.0 push_to_hub: bool=False hub_token: str=None hub_model_id: str=None logging_dir: str="logs" dataset_name: str=DatasetLoader.POKEMON_CAPTION poison_rate: float=None
image_trigger: str=Backdoor.TRIGGER_NONE
0
2023-10-17 19:57:37+00:00
24k
nchen909/Pass-Tuning
evaluator/CodeBLEU/dataflow_match.py
[ { "identifier": "DFG_python", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_python(root_node,index_to_code,states):\n assignment=['assignment','augmented_assignment','for_in_clause']\n if_statement=['if_statement']\n for_statement=['for_statement']\n while_statement=['while_statement']\n do_first_statement=['for_in_clause'] \n def_statement=['default_parameter']\n states=states.copy() \n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment': \n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_python(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in assignment:\n if root_node.type=='for_in_clause':\n right_nodes=[root_node.children[-1]]\n left_nodes=[root_node.child_by_field_name('left')]\n else:\n if root_node.child_by_field_name('right') is None:\n return [],states\n left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']\n right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']\n if len(right_nodes)!=len(left_nodes):\n left_nodes=[root_node.child_by_field_name('left')]\n right_nodes=[root_node.child_by_field_name('right')]\n if len(left_nodes)==0:\n left_nodes=[root_node.child_by_field_name('left')]\n if len(right_nodes)==0:\n right_nodes=[root_node.child_by_field_name('right')]\n DFG=[]\n for node in right_nodes:\n temp,states=DFG_python(node,index_to_code,states)\n DFG+=temp\n \n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in ['elif_clause','else_clause']:\n temp,current_states=DFG_python(child,index_to_code,current_states)\n DFG+=temp\n else:\n temp,new_states=DFG_python(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for i in range(2):\n right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']\n left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']\n if len(right_nodes)!=len(left_nodes):\n left_nodes=[root_node.child_by_field_name('left')]\n right_nodes=[root_node.child_by_field_name('right')]\n if len(left_nodes)==0:\n left_nodes=[root_node.child_by_field_name('left')]\n if len(right_nodes)==0:\n right_nodes=[root_node.child_by_field_name('right')]\n for node in right_nodes:\n temp,states=DFG_python(node,index_to_code,states)\n DFG+=temp\n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n if root_node.children[-1].type==\"block\":\n temp,states=DFG_python(root_node.children[-1],index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_python(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_python(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_python(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_java", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_java(root_node,index_to_code,states):\n assignment=['assignment_expression']\n def_statement=['variable_declarator']\n increment_statement=['update_expression']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=['enhanced_for_statement']\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_java(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_java(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_java(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_java(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"local_variable_declaration\":\n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in enhanced_for_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n body=root_node.child_by_field_name('body')\n DFG=[]\n for i in range(2):\n temp,states=DFG_java(value,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n temp,states=DFG_java(body,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_ruby", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_ruby(root_node,index_to_code,states):\n assignment=['assignment','operator_assignment']\n if_statement=['if','elsif','else','unless','when']\n for_statement=['for']\n while_statement=['while_modifier','until']\n do_first_statement=[] \n def_statement=['keyword_parameter']\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n states=states.copy()\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_ruby(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in assignment:\n left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']\n right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']\n if len(right_nodes)!=len(left_nodes):\n left_nodes=[root_node.child_by_field_name('left')]\n right_nodes=[root_node.child_by_field_name('right')]\n if len(left_nodes)==0:\n left_nodes=[root_node.child_by_field_name('left')]\n if len(right_nodes)==0:\n right_nodes=[root_node.child_by_field_name('right')]\n if root_node.type==\"operator_assignment\":\n left_nodes=[root_node.children[0]]\n right_nodes=[root_node.children[-1]]\n\n DFG=[]\n for node in right_nodes:\n temp,states=DFG_ruby(node,index_to_code,states)\n DFG+=temp\n \n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement:\n temp,current_states=DFG_ruby(child,index_to_code,current_states)\n DFG+=temp\n else:\n temp,new_states=DFG_ruby(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for i in range(2):\n left_nodes=[root_node.child_by_field_name('pattern')]\n right_nodes=[root_node.child_by_field_name('value')]\n assert len(right_nodes)==len(left_nodes)\n for node in right_nodes:\n temp,states=DFG_ruby(node,index_to_code,states)\n DFG+=temp\n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n temp,states=DFG_ruby(root_node.child_by_field_name('body'),index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_ruby(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_ruby(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_ruby(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_go", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_go(root_node,index_to_code,states):\n assignment=['assignment_statement',]\n def_statement=['var_spec']\n increment_statement=['inc_statement']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=[]\n while_statement=[]\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_go(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_go(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_go(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_go(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in states:\n if key not in new_states:\n new_states[key]=states[key]\n else:\n new_states[key]+=states[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"for_clause\":\n if child.child_by_field_name('update') is not None:\n temp,states=DFG_go(child.child_by_field_name('update'),index_to_code,states)\n DFG+=temp \n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_php", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_php(root_node,index_to_code,states):\n assignment=['assignment_expression','augmented_assignment_expression']\n def_statement=['simple_parameter']\n increment_statement=['update_expression']\n if_statement=['if_statement','else_clause']\n for_statement=['for_statement']\n enhanced_for_statement=['foreach_statement']\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('default_value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_php(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_php(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_php(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_php(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in states:\n if key not in new_states:\n new_states[key]=states[key]\n else:\n new_states[key]+=states[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"assignment_expression\": \n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in enhanced_for_statement:\n name=None\n value=None\n for child in root_node.children:\n if child.type=='variable_name' and value is None:\n value=child\n elif child.type=='variable_name' and name is None:\n name=child\n break\n body=root_node.child_by_field_name('body')\n DFG=[]\n for i in range(2):\n temp,states=DFG_php(value,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n temp,states=DFG_php(body,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_javascript", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_javascript(root_node,index_to_code,states):\n assignment=['assignment_pattern','augmented_assignment_expression']\n def_statement=['variable_declarator']\n increment_statement=['update_expression']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=[]\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_javascript(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_javascript(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_javascript(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states) \n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in states:\n if key not in new_states:\n new_states[key]=states[key]\n else:\n new_states[key]+=states[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"variable_declaration\": \n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_csharp", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_csharp(root_node,index_to_code,states):\n assignment=['assignment_expression']\n def_statement=['variable_declarator']\n increment_statement=['postfix_unary_expression']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=['for_each_statement']\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n if len(root_node.children)==2:\n name=root_node.children[0]\n value=root_node.children[1]\n else:\n name=root_node.children[0]\n value=None\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_csharp(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_csharp(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_csharp(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"local_variable_declaration\":\n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in enhanced_for_statement:\n name=root_node.child_by_field_name('left')\n value=root_node.child_by_field_name('right')\n body=root_node.child_by_field_name('body')\n DFG=[]\n for i in range(2):\n temp,states=DFG_csharp(value,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n temp,states=DFG_csharp(body,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_c", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_c(root_node, index_to_code, states):\n assignment = ['assignment_expression']\n def_statement = ['init_declatator', 'pointer_declarator', 'array_declarator']\n increment_statement = ['update_expression']\n if_statement = ['if_statement', 'else']\n for_statement = ['for_statement']\n while_statement = ['while_statement']\n parameter_statement = ['parameter_declaration']\n do_first_statement = []\n states = states.copy()\n if (len(root_node.children) == 0 or root_node.type == 'string') and root_node.type != 'comment':\n idx, code = index_to_code[(root_node.start_point, root_node.end_point)]\n if root_node.type == code or (root_node.parent.type == 'function_declarator' and root_node):\n return [], states\n elif code in states:\n return [(code, idx, 'comesFrom', [code], states[code].copy())], states\n elif root_node.type == 'identifier':\n if root_node.parent.type == 'declaration':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n return [], states\n else:\n return [], states\n elif root_node.type in def_statement:\n\n if root_node.parent.type == 'function_definition':\n while root_node.type == 'pointer_declarator' and root_node.child_by_field_name('declarator').type == 'pointer_declarator':\n root_node = root_node.child_by_field_name('declarator')\n DFG = []\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n return sorted(DFG, key=lambda x: x[1]), states\n name = root_node.child_by_field_name('declarator')\n value = root_node.child_by_field_name('value')\n DFG = []\n if value is None:\n indexs = tree_to_variable_index(name, index_to_code)\n for index in indexs:\n idx, code = index_to_code[index]\n DFG.append((code, idx, 'comesFrom', [], []))\n states[code] = [idx]\n return sorted(DFG, key=lambda x: x[1]), states\n else:\n name_indexs = tree_to_variable_index(name, index_to_code)\n value_indexs = tree_to_variable_index(value, index_to_code)\n temp, states = DFG_c(value, index_to_code, states)\n DFG += temp\n for index1 in name_indexs:\n idx1, code1 = index_to_code[index1]\n for index2 in value_indexs:\n idx2, code2 = index_to_code[index2]\n DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))\n states[code1] = [idx1]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in assignment:\n # left_nodes = root_node.child_by_field_name('left')\n # right_nodes = root_node.child_by_field_name('right')\n # DFG = []\n # temp, states = DFG_c(right_nodes, index_to_code, states)\n # DFG += temp\n # # filter field identifiers\n # while left_nodes.type == 'field_expression' or left_nodes.type == 'subscript_expression':\n # left_nodes = left_nodes.child_by_field_name('argument')\n # left_node = left_nodes\n # name_indexs = tree_to_variable_index(left_node, index_to_code)\n # value_indexs = tree_to_variable_index(right_nodes, index_to_code)\n # for index1 in name_indexs:\n # idx1, code1 = index_to_code[index1]\n # for index2 in value_indexs:\n # idx2, code2 = index_to_code[index2]\n # if code1 == \"alarm_timers\":\n # print(12)\n # if code1 in\n # DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))\n # states[code1] = [idx1]\n return [], states\n elif root_node.type in increment_statement:\n DFG = []\n indexs = tree_to_variable_index(root_node, index_to_code)\n for index1 in indexs:\n idx1, code1 = index_to_code[index1]\n for index2 in indexs:\n idx2, code2 = index_to_code[index2]\n DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))\n states[code1] = [idx1]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in if_statement:\n DFG = []\n current_states = states.copy()\n others_states = []\n flag = False\n tag = False\n if 'else' in root_node.type:\n tag = True\n for child in root_node.children:\n if 'else' in child.type:\n tag = True\n if child.type not in if_statement and flag is False:\n temp, current_states = DFG_c(child, index_to_code, current_states)\n DFG += temp\n else:\n flag = True\n temp, new_states = DFG_c(child, index_to_code, states)\n DFG += temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states = {}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key] = dic[key].copy()\n else:\n new_states[key] += dic[key]\n for key in states:\n if key not in new_states:\n new_states[key] = states[key]\n else:\n new_states[key] += states[key]\n for key in new_states:\n new_states[key] = sorted(list(set(new_states[key])))\n return sorted(DFG, key=lambda x: x[1]), new_states\n elif root_node.type in for_statement:\n DFG = []\n for child in root_node.children:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n flag = False\n for child in root_node.children:\n if flag:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n elif child.type == \"variable_declaration\":\n flag = True\n dic = {}\n for x in DFG:\n if (x[0], x[1], x[2]) not in dic:\n dic[(x[0], x[1], x[2])] = [x[3], x[4]]\n else:\n dic[(x[0], x[1], x[2])][0] = list(set(dic[(x[0], x[1], x[2])][0] + x[3]))\n dic[(x[0], x[1], x[2])][1] = sorted(list(set(dic[(x[0], x[1], x[2])][1] + x[4])))\n DFG = [(x[0], x[1], x[2], y[0], y[1]) for x, y in sorted(dic.items(), key=lambda t: t[0][1])]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in while_statement:\n DFG = []\n for i in range(2):\n for child in root_node.children:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n dic = {}\n for x in DFG:\n if (x[0], x[1], x[2]) not in dic:\n dic[(x[0], x[1], x[2])] = [x[3], x[4]]\n else:\n dic[(x[0], x[1], x[2])][0] = list(set(dic[(x[0], x[1], x[2])][0] + x[3]))\n dic[(x[0], x[1], x[2])][1] = sorted(list(set(dic[(x[0], x[1], x[2])][1] + x[4])))\n DFG = [(x[0], x[1], x[2], y[0], y[1]) for x, y in sorted(dic.items(), key=lambda t: t[0][1])]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in parameter_statement:\n child = root_node.child_by_field_name('declarator')\n if not child:\n return [], states\n while(child.type != 'identifier'):\n if child.type == 'parenthesized_declarator':\n child = child.children[1]\n else:\n child = child.child_by_field_name('declarator')\n if not child:\n return [], states\n idx,code=index_to_code[(child.start_point,child.end_point)]\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n else:\n DFG = []\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n return sorted(DFG, key=lambda x: x[1]), states" }, { "identifier": "remove_comments_and_docstrings", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def remove_comments_and_docstrings(source, lang):\n if lang in ['python']:\n \"\"\"\n Returns 'source' minus comments and docstrings.\n \"\"\"\n io_obj = StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n ltext = tok[4]\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n if start_col > 0:\n out += token_string\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n temp = []\n for x in out.split('\\n'):\n if x.strip() != \"\":\n temp.append(x)\n return '\\n'.join(temp)\n elif lang in ['ruby']:\n return source\n else:\n def replacer(match):\n s = match.group(0)\n if s.startswith('/'):\n return \" \" # note: a space and not an empty string\n else:\n return s\n\n pattern = re.compile(\n r'//.*?$|/\\*.*?\\*/|\\'(?:\\\\.|[^\\\\\\'])*\\'|\"(?:\\\\.|[^\\\\\"])*\"',\n re.DOTALL | re.MULTILINE\n )\n temp = []\n for x in re.sub(pattern, replacer, source).split('\\n'):\n if x.strip() != \"\":\n temp.append(x)\n return '\\n'.join(temp)" }, { "identifier": "tree_to_token_index", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def tree_to_token_index(root_node):\n if (len(root_node.children) == 0 or root_node.type in ['string_literal', 'string',\n 'character_literal']) and root_node.type != 'comment':\n return [(root_node.start_point, root_node.end_point)]\n else:\n code_tokens = []\n for child in root_node.children:\n code_tokens += tree_to_token_index(child)\n return code_tokens" }, { "identifier": "index_to_code_token", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def index_to_code_token(index, code):\n start_point = index[0]\n end_point = index[1]\n if start_point[0] == end_point[0]:\n s = code[start_point[0]][start_point[1]:end_point[1]]\n else:\n s = \"\"\n s += code[start_point[0]][start_point[1]:]\n for i in range(start_point[0] + 1, end_point[0]):\n s += code[i]\n s += code[end_point[0]][:end_point[1]]\n return s" }, { "identifier": "tree_to_variable_index", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def tree_to_variable_index(root_node, index_to_code):\n if (len(root_node.children) == 0 or root_node.type in ['string_literal', 'string',\n 'character_literal']) and root_node.type != 'comment':\n index = (root_node.start_point, root_node.end_point)\n _, code = index_to_code[index]\n if root_node.type != code:\n return [(root_node.start_point, root_node.end_point)]\n else:\n return []\n else:\n code_tokens = []\n for child in root_node.children:\n code_tokens += tree_to_variable_index(child, index_to_code)\n return code_tokens" } ]
from evaluator.CodeBLEU.parser import DFG_python, DFG_java, DFG_ruby, DFG_go, DFG_php, DFG_javascript, DFG_csharp, DFG_c from evaluator.CodeBLEU.parser import (remove_comments_and_docstrings, tree_to_token_index, index_to_code_token, tree_to_variable_index) from tree_sitter import Language, Parser import pdb
17,585
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python, 'java': DFG_java, 'ruby': DFG_ruby, 'go': DFG_go, 'php': DFG_php, 'javascript': DFG_javascript, 'c_sharp': DFG_csharp, 'c': DFG_c, } def calc_dataflow_match(references, candidate, lang): return corpus_dataflow_match([references], [candidate], lang) def corpus_dataflow_match(references, candidates, lang): #LANGUAGE = Language('{}/my-languages.so'.format(parser_path), lang) LANGUAGE = Language('build/my-languages.so', lang) parser = Parser() parser.set_language(LANGUAGE) parser = [parser, dfg_function[lang]] match_count = 0 total_count = 0 for i in range(len(candidates)): references_sample = references[i] candidate = candidates[i] for reference in references_sample: try:
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python, 'java': DFG_java, 'ruby': DFG_ruby, 'go': DFG_go, 'php': DFG_php, 'javascript': DFG_javascript, 'c_sharp': DFG_csharp, 'c': DFG_c, } def calc_dataflow_match(references, candidate, lang): return corpus_dataflow_match([references], [candidate], lang) def corpus_dataflow_match(references, candidates, lang): #LANGUAGE = Language('{}/my-languages.so'.format(parser_path), lang) LANGUAGE = Language('build/my-languages.so', lang) parser = Parser() parser.set_language(LANGUAGE) parser = [parser, dfg_function[lang]] match_count = 0 total_count = 0 for i in range(len(candidates)): references_sample = references[i] candidate = candidates[i] for reference in references_sample: try:
candidate = remove_comments_and_docstrings(candidate, 'java')
8
2023-10-20 09:24:44+00:00
24k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES = getattr(settings, 'DJANGO_LEDGER_USE_CLOSING_ENTRIES', False)\nDJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT = getattr(settings,\n 'DJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT', 3600)\nDJANGO_LEDGER_LOGIN_URL = getattr(settings, 'DJANGO_LEDGER_LOGIN_URL', settings.LOGIN_URL)\nDJANGO_LEDGER_BILL_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_INVOICE_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_FORM_INPUT_CLASSES = getattr(settings, 'DJANGO_LEDGER_FORM_INPUT_CLASSES', 'input')\nDJANGO_LEDGER_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_CURRENCY_SYMBOL', '$')\nDJANGO_LEDGER_SPACED_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_SPACED_CURRENCY_SYMBOL', False)\nDJANGO_LEDGER_SHOW_FEEDBACK_BUTTON = getattr(settings, 'DJANGO_LEDGER_SHOW_FEEDBACK_BUTTON', False)\nDJANGO_LEDGER_FEEDBACK_EMAIL_LIST = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_EMAIL_LIST', [])\nDJANGO_LEDGER_FEEDBACK_FROM_EMAIL = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_FROM_EMAIL', None)\nDJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME = getattr(settings, 'DJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME', False)\nDJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE', Decimal('0.02'))\nDJANGO_LEDGER_TRANSACTION_CORRECTION = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_CORRECTION', Decimal('0.01'))\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE', True)\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', 5)\nDJANGO_LEDGER_ACCOUNT_CODE_USE_PREFIX = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', True)\nDJANGO_LEDGER_JE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_PREFIX', 'JE')\nDJANGO_LEDGER_PO_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PO_NUMBER_PREFIX', 'PO')\nDJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX', 'E')\nDJANGO_LEDGER_INVOICE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_PREFIX', 'I')\nDJANGO_LEDGER_BILL_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_PREFIX', 'B')\nDJANGO_LEDGER_VENDOR_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_VENDOR_NUMBER_PREFIX', 'V')\nDJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX', 'C')\nDJANGO_LEDGER_EXPENSE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_EXPENSE_NUMBER_PREFIX', 'IEX')\nDJANGO_LEDGER_INVENTORY_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVENTORY_NUMBER_PREFIX', 'INV')\nDJANGO_LEDGER_PRODUCT_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PRODUCT_NUMBER_PREFIX', 'IPR')\nDJANGO_LEDGER_DOCUMENT_NUMBER_PADDING = getattr(settings, 'DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING', 10)\nDJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX', '000')\nDJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.bill.BillModelAbstract')\nDJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.invoice.InvoiceModelAbstract')\nDJANGO_LEDGER_DEFAULT_COA = getattr(settings, 'DJANGO_LEDGER_DEFAULT_COA', None)\nDJANGO_LEDGER_FINANCIAL_ANALYSIS = {\n 'ratios': {\n 'current_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'quick_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'debt_to_equity': {\n 'good_incremental': False,\n 'ranges': {\n 'healthy': 0,\n 'watch': .25,\n 'warning': .5,\n 'critical': 1\n }\n },\n 'return_on_equity': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .07,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'return_on_assets': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'net_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'gross_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n }\n}" }, { "identifier": "InvalidDateInputError", "path": "django_ledger/exceptions.py", "snippet": "class InvalidDateInputError(ValidationError):\n pass" }, { "identifier": "TransactionNotInBalanceError", "path": "django_ledger/exceptions.py", "snippet": "class TransactionNotInBalanceError(ValidationError):\n pass" }, { "identifier": "roles", "path": "django_ledger/io/roles.py", "snippet": "DEBIT = 'debit'\nCREDIT = 'credit'\nASSET_CA_CASH = 'asset_ca_cash'\nASSET_CA_MKT_SECURITIES = 'asset_ca_mkt_sec'\nASSET_CA_RECEIVABLES = 'asset_ca_recv'\nASSET_CA_INVENTORY = 'asset_ca_inv'\nASSET_CA_UNCOLLECTIBLES = 'asset_ca_uncoll'\nASSET_CA_PREPAID = 'asset_ca_prepaid'\nASSET_CA_OTHER = 'asset_ca_other'\nASSET_LTI_NOTES_RECEIVABLE = 'asset_lti_notes'\nASSET_LTI_LAND = 'asset_lti_land'\nASSET_LTI_SECURITIES = 'asset_lti_sec'\nASSET_PPE_BUILDINGS = 'asset_ppe_build'\nASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION = 'asset_ppe_build_accum_depr'\nASSET_PPE_EQUIPMENT = 'asset_ppe_equip'\nASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION = 'asset_ppe_equip_accum_depr'\nASSET_PPE_PLANT = 'asset_ppe_plant'\nASSET_PPE_PLANT_ACCUM_DEPRECIATION = 'asset_ppe_plant_depr'\nASSET_INTANGIBLE_ASSETS = 'asset_ia'\nASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION = 'asset_ia_accum_amort'\nASSET_ADJUSTMENTS = 'asset_adjustment'\nLIABILITY_CL_ACC_PAYABLE = 'lia_cl_acc_payable'\nLIABILITY_CL_WAGES_PAYABLE = 'lia_cl_wages_payable'\nLIABILITY_CL_TAXES_PAYABLE = 'lia_cl_taxes_payable'\nLIABILITY_CL_INTEREST_PAYABLE = 'lia_cl_int_payable'\nLIABILITY_CL_ST_NOTES_PAYABLE = 'lia_cl_st_notes_payable'\nLIABILITY_CL_LTD_MATURITIES = 'lia_cl_ltd_mat'\nLIABILITY_CL_DEFERRED_REVENUE = 'lia_cl_def_rev'\nLIABILITY_CL_OTHER = 'lia_cl_other'\nLIABILITY_LTL_NOTES_PAYABLE = 'lia_ltl_notes'\nLIABILITY_LTL_BONDS_PAYABLE = 'lia_ltl_bonds'\nLIABILITY_LTL_MORTGAGE_PAYABLE = 'lia_ltl_mortgage'\nEQUITY_CAPITAL = 'eq_capital'\nEQUITY_ADJUSTMENT = 'eq_adjustment'\nEQUITY_COMMON_STOCK = 'eq_stock_common'\nEQUITY_PREFERRED_STOCK = 'eq_stock_preferred'\nEQUITY_DIVIDENDS = 'eq_dividends'\nINCOME_OPERATIONAL = 'in_operational'\nINCOME_PASSIVE = 'in_passive'\nINCOME_CAPITAL_GAIN_LOSS = 'in_gain_loss'\nINCOME_INTEREST = 'in_interest'\nINCOME_OTHER = 'in_other'\nCOGS = 'cogs_regular'\nEXPENSE_OPERATIONAL = 'ex_regular'\nEXPENSE_CAPITAL = 'ex_capital'\nEXPENSE_DEPRECIATION = 'ex_depreciation'\nEXPENSE_AMORTIZATION = 'ex_amortization'\nEXPENSE_TAXES = 'ex_taxes'\nEXPENSE_INTEREST_ST = 'ex_interest_st'\nEXPENSE_INTEREST_LT = 'ex_interest'\nEXPENSE_OTHER = 'ex_other'\nROOT_COA = 'root_coa'\nROOT_ASSETS = 'root_assets'\nROOT_LIABILITIES = 'root_liabilities'\nROOT_CAPITAL = 'root_capital'\nROOT_INCOME = 'root_income'\nROOT_COGS = 'root_cogs'\nROOT_EXPENSES = 'root_expenses'\nROOT_GROUP = [\n ROOT_COA,\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_LEVEL_2 = [\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_META = {\n ROOT_COA: {\n 'code': '00000',\n 'title': 'CoA Root Node',\n 'balance_type': DEBIT\n },\n ROOT_ASSETS: {\n 'code': '01000',\n 'title': 'Asset Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_LIABILITIES: {\n 'code': '02000',\n 'title': 'Liability Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_CAPITAL: {\n 'code': '03000',\n 'title': 'Capital Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_INCOME: {\n 'code': '04000',\n 'title': 'Income Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_COGS: {\n 'code': '05000',\n 'title': 'COGS Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_EXPENSES: {\n 'code': '06000',\n 'title': 'Expense Accounts Root Node',\n 'balance_type': DEBIT\n },\n}\nGROUP_QUICK_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES\n]\nGROUP_CURRENT_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES,\n ASSET_CA_INVENTORY,\n ASSET_CA_RECEIVABLES,\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_NON_CURRENT_ASSETS = [\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_LAND,\n ASSET_LTI_SECURITIES,\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION,\n ASSET_INTANGIBLE_ASSETS,\n ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION,\n ASSET_ADJUSTMENTS\n]\nGROUP_ASSETS = GROUP_CURRENT_ASSETS + GROUP_NON_CURRENT_ASSETS\nGROUP_CURRENT_LIABILITIES = [\n LIABILITY_CL_ACC_PAYABLE,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_OTHER,\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE\n]\nGROUP_LT_LIABILITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n]\nGROUP_LIABILITIES = GROUP_CURRENT_LIABILITIES + GROUP_LT_LIABILITIES\nGROUP_CAPITAL = [\n EQUITY_CAPITAL,\n EQUITY_COMMON_STOCK,\n EQUITY_PREFERRED_STOCK,\n EQUITY_DIVIDENDS,\n EQUITY_ADJUSTMENT\n]\nGROUP_INCOME = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_COGS = [\n COGS\n]\nGROUP_EXPENSES = [\n EXPENSE_OPERATIONAL,\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_NET_PROFIT = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER,\n COGS\n]\nGROUP_GROSS_PROFIT = [\n INCOME_OPERATIONAL,\n COGS\n]\nGROUP_NET_SALES = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE\n]\nGROUP_PPE_ACCUM_DEPRECIATION = [\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION\n]\nGROUP_EXPENSE_DEP_AND_AMT = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_EARNINGS = GROUP_INCOME + GROUP_COGS + GROUP_EXPENSES\nGROUP_EQUITY = GROUP_CAPITAL + GROUP_EARNINGS\nGROUP_LIABILITIES_EQUITY = GROUP_LIABILITIES + GROUP_EQUITY\nGROUP_INVOICE = [ASSET_CA_CASH, ASSET_CA_RECEIVABLES, LIABILITY_CL_DEFERRED_REVENUE]\nGROUP_BILL = [ASSET_CA_CASH, ASSET_CA_PREPAID, LIABILITY_CL_ACC_PAYABLE]\nGROUP_IC_OPERATING_REVENUES = [INCOME_OPERATIONAL]\nGROUP_IC_OPERATING_COGS = [COGS]\nGROUP_IC_OPERATING_EXPENSES = [EXPENSE_OPERATIONAL]\nGROUP_IC_OTHER_REVENUES = [\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_IC_OTHER_EXPENSES = [\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_CFS_NET_INCOME = GROUP_EARNINGS\nGROUP_CFS_OP_DEPRECIATION_AMORTIZATION = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_CFS_OP_INVESTMENT_GAINS = [\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_OP_ACCOUNTS_RECEIVABLE = [\n ASSET_CA_RECEIVABLES\n]\nGROUP_CFS_OP_INVENTORY = [\n ASSET_CA_INVENTORY\n]\nGROUP_CFS_OP_ACCOUNTS_PAYABLE = [\n LIABILITY_CL_ACC_PAYABLE\n]\nGROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT = [\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT = [\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_OTHER,\n]\nGROUP_CFS_OPERATING = list(chain.from_iterable([\n GROUP_CFS_NET_INCOME,\n GROUP_CFS_OP_DEPRECIATION_AMORTIZATION,\n GROUP_CFS_OP_INVESTMENT_GAINS,\n GROUP_CFS_OP_ACCOUNTS_RECEIVABLE,\n GROUP_CFS_OP_INVENTORY,\n GROUP_CFS_OP_ACCOUNTS_PAYABLE,\n GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT,\n GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT\n]))\nGROUP_CFS_FIN_ISSUING_EQUITY = [EQUITY_CAPITAL, EQUITY_COMMON_STOCK, EQUITY_PREFERRED_STOCK]\nGROUP_CFS_FIN_DIVIDENDS = [EQUITY_DIVIDENDS]\nGROUP_CFS_FIN_ST_DEBT_PAYMENTS = [\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_ACC_PAYABLE,\n EXPENSE_INTEREST_ST\n]\nGROUP_CFS_FIN_LT_DEBT_PAYMENTS = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n EXPENSE_INTEREST_LT\n]\nGROUP_CFS_FINANCING = GROUP_CFS_FIN_ISSUING_EQUITY + GROUP_CFS_FIN_DIVIDENDS\nGROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE = [\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_PLANT,\n ASSET_PPE_EQUIPMENT,\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_INV_LTD_OF_PPE = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n]\nGROUP_CFS_INVESTING_PPE = GROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE + GROUP_CFS_INV_LTD_OF_PPE\nGROUP_CFS_INV_PURCHASE_OF_SECURITIES = [\n ASSET_CA_MKT_SECURITIES,\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_SECURITIES,\n INCOME_INTEREST,\n INCOME_PASSIVE,\n]\nGROUP_CFS_INV_LTD_OF_SECURITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE\n]\nGROUP_CFS_INVESTING_SECURITIES = GROUP_CFS_INV_PURCHASE_OF_SECURITIES + GROUP_CFS_INV_LTD_OF_SECURITIES\nGROUP_CFS_INVESTING = GROUP_CFS_INVESTING_PPE + GROUP_CFS_INVESTING_SECURITIES\nGROUP_CFS_INVESTING_AND_FINANCING = GROUP_CFS_INVESTING + GROUP_CFS_FINANCING\nBS_ASSET_ROLE = 'assets'\nBS_LIABILITIES_ROLE = 'liabilities'\nBS_EQUITY_ROLE = 'equity'\nACCOUNT_ROLE_CHOICES = [\n (BS_ASSET_ROLE.capitalize(), (\n # CURRENT ASSETS ----\n (ASSET_CA_CASH, _('Current Asset')),\n (ASSET_CA_MKT_SECURITIES, _('Marketable Securities')),\n (ASSET_CA_RECEIVABLES, _('Receivables')),\n (ASSET_CA_INVENTORY, _('Inventory')),\n (ASSET_CA_UNCOLLECTIBLES, _('Uncollectibles')),\n (ASSET_CA_PREPAID, _('Prepaid')),\n (ASSET_CA_OTHER, _('Other Liquid Assets')),\n\n # LONG TERM INVESTMENTS ---\n (ASSET_LTI_NOTES_RECEIVABLE, _('Notes Receivable')),\n (ASSET_LTI_LAND, _('Land')),\n (ASSET_LTI_SECURITIES, _('Securities')),\n\n # PPE ...\n (ASSET_PPE_BUILDINGS, _('Buildings')),\n (ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION, _('Buildings - Accum. Depreciation')),\n (ASSET_PPE_PLANT, _('Plant')),\n (ASSET_PPE_PLANT_ACCUM_DEPRECIATION, _('Plant - Accum. Depreciation')),\n (ASSET_PPE_EQUIPMENT, _('Equipment')),\n (ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION, _('Equipment - Accum. Depreciation')),\n\n # Other Assets ...\n (ASSET_INTANGIBLE_ASSETS, _('Intangible Assets')),\n (ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION, _('Intangible Assets - Accum. Amortization')),\n (ASSET_ADJUSTMENTS, _('Other Assets')),\n )),\n (BS_LIABILITIES_ROLE.capitalize(), (\n\n # CURRENT LIABILITIES ---\n (LIABILITY_CL_ACC_PAYABLE, _('Accounts Payable')),\n (LIABILITY_CL_WAGES_PAYABLE, _('Wages Payable')),\n (LIABILITY_CL_INTEREST_PAYABLE, _('Interest Payable')),\n (LIABILITY_CL_TAXES_PAYABLE, _('Taxes Payable')),\n (LIABILITY_CL_ST_NOTES_PAYABLE, _('Short Term Notes Payable')),\n (LIABILITY_CL_LTD_MATURITIES, _('Current Maturities of Long Tern Debt')),\n (LIABILITY_CL_DEFERRED_REVENUE, _('Deferred Revenue')),\n (LIABILITY_CL_OTHER, _('Other Liabilities')),\n\n # LONG TERM LIABILITIES ----\n (LIABILITY_LTL_NOTES_PAYABLE, _('Long Term Notes Payable')),\n (LIABILITY_LTL_BONDS_PAYABLE, _('Bonds Payable')),\n (LIABILITY_LTL_MORTGAGE_PAYABLE, _('Mortgage Payable')),\n )),\n (BS_EQUITY_ROLE.capitalize(), (\n\n # EQUITY ---\n (EQUITY_CAPITAL, _('Capital')),\n (EQUITY_COMMON_STOCK, _('Common Stock')),\n (EQUITY_PREFERRED_STOCK, _('Preferred Stock')),\n (EQUITY_ADJUSTMENT, _('Other Equity Adjustments')),\n (EQUITY_DIVIDENDS, _('Dividends & Distributions to Shareholders')),\n\n # INCOME ---\n (INCOME_OPERATIONAL, _('Operational Income')),\n (INCOME_PASSIVE, _('Investing/Passive Income')),\n (INCOME_INTEREST, _('Interest Income')),\n (INCOME_CAPITAL_GAIN_LOSS, _('Capital Gain/Loss Income')),\n (INCOME_OTHER, _('Other Income')),\n\n # COGS ----\n (COGS, _('Cost of Goods Sold')),\n\n # EXPENSES ----\n (EXPENSE_OPERATIONAL, _('Regular Expense')),\n (EXPENSE_INTEREST_ST, _('Interest Expense - Short Term Debt')),\n (EXPENSE_INTEREST_LT, _('Interest Expense - Long Term Debt')),\n (EXPENSE_TAXES, _('Tax Expense')),\n (EXPENSE_CAPITAL, _('Capital Expense')),\n (EXPENSE_DEPRECIATION, _('Depreciation Expense')),\n (EXPENSE_AMORTIZATION, _('Amortization Expense')),\n (EXPENSE_OTHER, _('Other Expense')),\n )),\n ('Root', (\n (ROOT_COA, 'CoA Root Account'),\n (ROOT_ASSETS, 'Assets Root Account'),\n (ROOT_LIABILITIES, 'Liabilities Root Account'),\n (ROOT_CAPITAL, 'Capital Root Account'),\n (ROOT_INCOME, 'Income Root Account'),\n (ROOT_COGS, 'COGS Root Account'),\n (ROOT_EXPENSES, 'Expenses Root Account'),\n ))\n]\nACCOUNT_CHOICES_NO_ROOT = [c for c in ACCOUNT_ROLE_CHOICES if c[0] != 'Root']\nROLES_ORDER_ASSETS = [a[0] for a in ACCOUNT_ROLE_CHOICES[0][1]]\nROLES_ORDER_LIABILITIES = [a[0] for a in ACCOUNT_ROLE_CHOICES[1][1]]\nROLES_ORDER_CAPITAL = [a[0] for a in ACCOUNT_ROLE_CHOICES[2][1]]\nROLES_ORDER_ALL = list(chain.from_iterable([ROLES_ORDER_ASSETS, ROLES_ORDER_LIABILITIES, ROLES_ORDER_CAPITAL]))\nACCOUNT_LIST_ROLE_ORDER = list(r[0] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT]))\nACCOUNT_LIST_ROLE_VERBOSE = {r[0]: r[1] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT])}\nROLE_TUPLES = sum([[(r[0].lower(), s[0]) for s in r[1]] for r in ACCOUNT_ROLE_CHOICES], list())\nROLE_DICT = dict([(t[0].lower(), [r[0] for r in t[1]]) for t in ACCOUNT_ROLE_CHOICES])\nVALID_ROLES = [r[1] for r in ROLE_TUPLES]\nBS_ROLES = dict((r[1], r[0]) for r in ROLE_TUPLES)\nBS_BUCKETS = {\n '0': 'Root',\n '1': 'Asset',\n '2': 'Liability',\n '3': 'Capital',\n '4': 'Income',\n '5': 'COGS',\n '6': 'Expenses'\n}\nBS_BUCKETS_ORDER = [v for _, v in BS_BUCKETS.items() if v != 'Root']\nROLES_VARS = locals().keys()\nROLES_DIRECTORY = dict()\nROLES_CATEGORIES = ['ASSET', 'LIABILITY', 'EQUITY', 'INCOME', 'COGS', 'EXPENSE']\nROLES_GROUPS = [g for g in ROLES_VARS if g.split('_')[0] == 'GROUP']\nGROUPS_DIRECTORY = dict()\ndef validate_roles(roles: Union[str, List[str]], raise_exception: bool = True) -> Set[str]:" }, { "identifier": "RoleContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class RoleContextManager:\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.DIGEST = io_data\n self.DIGEST['role_account'] = None\n self.DIGEST['role_balance'] = None\n\n self.ACCOUNTS = io_data['accounts']\n\n self.ROLES_ACCOUNTS = dict()\n self.ROLES_BALANCES = dict()\n self.ROLES_BALANCE_SHEET = dict()\n\n if self.BY_PERIOD:\n self.ROLES_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_period'] = None\n if self.BY_UNIT:\n self.ROLES_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_unit'] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_roles()\n self.DIGEST['role_account'] = self.ROLES_ACCOUNTS\n self.DIGEST['role_balance'] = self.ROLES_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['role_balance_by_period'] = self.ROLES_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['role_balance_by_unit'] = self.ROLES_BALANCES_BY_UNIT\n\n return self.DIGEST\n\n def process_roles(self):\n\n for c, l in roles_module.ROLES_DIRECTORY.items():\n for r in l:\n acc_list = list(acc for acc in self.ACCOUNTS if acc['role'] == getattr(roles_module, r))\n\n self.ROLES_ACCOUNTS[r] = acc_list\n self.ROLES_BALANCES[r] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ROLES_BALANCES_BY_PERIOD[key][r] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ROLES_BALANCES_BY_UNIT[key][r] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "GroupContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class GroupContextManager:\n GROUP_ACCOUNTS_KEY = 'group_account'\n GROUP_BALANCE_KEY = 'group_balance'\n GROUP_BALANCE_BY_UNIT_KEY = 'group_balance_by_unit'\n GROUP_BALANCE_BY_PERIOD_KEY = 'group_balance_by_period'\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.IO_DIGEST = io_data\n\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = None\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = None\n\n self.DIGEST_ACCOUNTS = io_data['accounts']\n\n self.GROUPS_ACCOUNTS = dict()\n self.GROUPS_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.GROUPS_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n if self.BY_UNIT:\n self.GROUPS_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.GROUPS_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n def digest(self):\n\n self.process_groups()\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = self.GROUPS_ACCOUNTS\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = self.GROUPS_BALANCES\n\n if self.BY_PERIOD:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = self.GROUPS_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = self.GROUPS_BALANCES_BY_UNIT\n return self.IO_DIGEST\n\n def get_accounts_generator(self, mod, g):\n return (acc for acc in self.DIGEST_ACCOUNTS if acc['role'] in getattr(mod, g))\n\n def process_groups(self):\n for g in roles_module.ROLES_GROUPS:\n acc_list = list(self.get_accounts_generator(roles_module, g))\n self.GROUPS_ACCOUNTS[g] = acc_list\n self.GROUPS_BALANCES[g] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.GROUPS_BALANCES_BY_PERIOD[key][g] = sum(\n acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.GROUPS_BALANCES_BY_UNIT[key][g] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0]\n )" }, { "identifier": "ActivityContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class ActivityContextManager:\n\n def __init__(self,\n io_data: dict,\n by_unit: bool = False,\n by_period: bool = False):\n\n self.DIGEST = io_data\n self.DIGEST['activity_account'] = None\n self.DIGEST['activity_balance'] = None\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.ACCOUNTS = io_data['accounts']\n self.ACTIVITY_ACCOUNTS = dict()\n self.ACTIVITY_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.ACTIVITY_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_period'] = None\n if self.BY_UNIT:\n self.ACTIVITY_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_unit'] = None\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_activity()\n self.DIGEST['activity_account'] = self.ACTIVITY_ACCOUNTS\n self.DIGEST['activity_balance'] = self.ACTIVITY_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['activity_balance_by_period'] = self.ACTIVITY_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['activity_balance_by_unit'] = self.ACTIVITY_BALANCES_BY_PERIOD\n\n def get_accounts_generator(self, activity: str):\n return (acc for acc in self.ACCOUNTS if acc['activity'] == activity)\n\n def process_activity(self):\n JournalEntryModel = lazy_importer.get_journal_entry_model()\n for act in JournalEntryModel.VALID_ACTIVITIES:\n acc_list = list(self.get_accounts_generator(act))\n self.ACTIVITY_ACCOUNTS[act] = acc_list\n self.ACTIVITY_BALANCES[act] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ACTIVITY_BALANCES_BY_PERIOD[key][act] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ACTIVITY_BALANCES_BY_UNIT[key][act] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "BalanceSheetStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class BalanceSheetStatementContextManager:\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n gb_bs = {\n bsr: list(l) for bsr, l in groupby(\n chain.from_iterable(\n [\n self.DIGEST['group_account']['GROUP_ASSETS'],\n self.DIGEST['group_account']['GROUP_LIABILITIES'],\n self.DIGEST['group_account']['GROUP_CAPITAL'],\n ]\n ),\n key=lambda acc: acc['role_bs'])\n }\n\n bs_context = {\n bs_role: {\n 'total_balance': sum(a['balance'] for a in gb),\n 'is_block': True,\n 'roles': {\n r: {\n 'accounts': list(a)\n } for r, a in groupby(list(gb), key=lambda acc: acc['role'])\n }\n } for bs_role, gb in gb_bs.items()\n }\n\n for bs_role, bs_role_data in bs_context.items():\n for acc_role, role_data in bs_role_data['roles'].items():\n role_data['total_balance'] = sum(a['balance'] for a in role_data['accounts'])\n role_data['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc_role]\n\n bs_context['equity_balance'] = self.DIGEST['group_balance']['GROUP_EQUITY']\n bs_context['retained_earnings_balance'] = self.DIGEST['group_balance']['GROUP_EARNINGS']\n bs_context['liabilities_equity_balance'] = self.DIGEST['group_balance']['GROUP_LIABILITIES_EQUITY']\n\n self.DIGEST['balance_sheet'] = bs_context\n\n return self.DIGEST" }, { "identifier": "IncomeStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class IncomeStatementContextManager:\n\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n self.DIGEST['income_statement'] = {\n 'operating': {\n 'revenues': [\n acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_REVENUES\n ],\n 'cogs': [\n acc for acc in self.DIGEST['group_account']['GROUP_COGS'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_COGS\n ],\n 'expenses': [\n acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_EXPENSES\n ]\n },\n 'other': {\n 'revenues': [acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_REVENUES],\n 'expenses': [acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_EXPENSES],\n }\n }\n\n for activity, ic_section in self.DIGEST['income_statement'].items():\n for section, acc_list in ic_section.items():\n for acc in acc_list:\n acc['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc['role']]\n\n # OPERATING INCOME...\n self.DIGEST['income_statement']['operating']['gross_profit'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs']\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs'],\n self.DIGEST['income_statement']['operating']['expenses'],\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_revenue'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['revenues']\n )\n self.DIGEST['income_statement']['operating']['net_cogs'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['cogs']\n )\n self.DIGEST['income_statement']['operating']['net_operating_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['expenses']\n )\n\n # OTHER INCOME....\n self.DIGEST['income_statement']['other']['net_other_revenues'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['revenues']\n )\n self.DIGEST['income_statement']['other']['net_other_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['expenses']\n )\n self.DIGEST['income_statement']['other']['net_other_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['other']['revenues'],\n self.DIGEST['income_statement']['other']['expenses']\n ]\n ))\n\n # NET INCOME...\n self.DIGEST['income_statement']['net_income'] = self.DIGEST['income_statement']['operating'][\n 'net_operating_income']\n self.DIGEST['income_statement']['net_income'] += self.DIGEST['income_statement']['other'][\n 'net_other_income']\n return self.DIGEST" }, { "identifier": "CashFlowStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class CashFlowStatementContextManager:\n CFS_DIGEST_KEY = 'cash_flow_statement'\n\n # todo: implement by period and by unit...\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n self.IO_DIGEST = io_data\n self.CASH_ACCOUNTS = [a for a in self.IO_DIGEST['accounts'] if a['role'] == roles_module.ASSET_CA_CASH]\n self.JE_MODEL = lazy_loader.get_journal_entry_model()\n\n def check_io_digest(self):\n if GroupContextManager.GROUP_BALANCE_KEY not in self.IO_DIGEST:\n raise ValidationError(\n 'IO Digest must have groups for Cash Flow Statement'\n )\n\n def operating(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n operating_activities = dict()\n operating_activities['GROUP_CFS_NET_INCOME'] = {\n 'description': 'Net Income',\n 'balance': group_balances['GROUP_CFS_NET_INCOME']\n }\n operating_activities['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION'] = {\n 'description': 'Depreciation & Amortization of Assets',\n 'balance': -group_balances['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION']\n }\n operating_activities['GROUP_CFS_OP_INVESTMENT_GAINS'] = {\n 'description': 'Gain/Loss Sale of Assets',\n 'balance': group_balances['GROUP_CFS_OP_INVESTMENT_GAINS']\n }\n operating_activities['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE'] = {\n 'description': 'Accounts Receivable',\n 'balance': -group_balances['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE']\n }\n operating_activities['GROUP_CFS_OP_INVENTORY'] = {\n 'description': 'Inventories',\n 'balance': -group_balances['GROUP_CFS_OP_INVENTORY']\n }\n\n operating_activities['GROUP_CFS_OP_ACCOUNTS_PAYABLE'] = {\n 'description': 'Accounts Payable',\n 'balance': group_balances['GROUP_CFS_OP_ACCOUNTS_PAYABLE']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT'] = {\n 'description': 'Other Current Assets',\n 'balance': -group_balances['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT'] = {\n 'description': 'Other Current Liabilities',\n 'balance': group_balances['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT']\n }\n\n net_cash_by_op_activities = sum(i['balance'] for g, i in operating_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['operating'] = operating_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'] = dict(\n OPERATING=net_cash_by_op_activities\n )\n\n def financing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n financing_activities = dict()\n financing_activities['GROUP_CFS_FIN_ISSUING_EQUITY'] = {\n 'description': 'Common Stock, Preferred Stock and Capital Raised',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_EQUITY)\n }\n financing_activities['GROUP_CFS_FIN_DIVIDENDS'] = {\n 'description': 'Dividends Payed Out to Shareholders',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_DIVIDENDS)\n }\n financing_activities['GROUP_CFS_FIN_ST_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Short-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_STD)\n }\n financing_activities['GROUP_CFS_FIN_LT_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Long-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_LTD)\n }\n\n net_cash = sum(i['balance'] for g, i in financing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['financing'] = financing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['FINANCING'] = net_cash\n\n def investing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n investing_activities = dict()\n investing_activities['GROUP_CFS_INVESTING_SECURITIES'] = {\n 'description': 'Purchase, Maturity and Sales of Investments & Securities',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_SECURITIES)\n }\n investing_activities['GROUP_CFS_INVESTING_PPE'] = {\n 'description': 'Addition and Disposition of Property, Plant & Equipment',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_PPE)\n }\n\n net_cash = sum(i['balance'] for g, i in investing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['investing'] = investing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['INVESTING'] = net_cash\n\n def net_cash(self):\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash'] = sum([\n bal for act, bal in self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'].items()\n ])\n\n def digest(self):\n self.check_io_digest()\n self.operating()\n self.financing()\n self.investing()\n self.net_cash()\n return self.IO_DIGEST" }, { "identifier": "IODigestContextManager", "path": "django_ledger/io/io_digest.py", "snippet": "class IODigestContextManager:\n\n def __init__(self, io_data: defaultdict):\n self.IO_DATA: defaultdict = io_data\n self.IO_MODEL = self.IO_DATA['io_model']\n self.TXS_QS = self.IO_DATA['txs_qs']\n self.STRFTIME_FORMAT = '%B %d, %Y'\n\n def get_io_data(self) -> defaultdict:\n return self.IO_DATA\n\n def get_strftime_format(self):\n return self.STRFTIME_FORMAT\n\n def get_from_date(self, as_str: bool = False, fmt=None) -> Optional[date]:\n from_date = self.IO_DATA['from_date']\n if from_date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return from_date.strftime(fmt)\n return from_date\n\n def get_to_date(self, as_str: bool = False, fmt=None) -> date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return self.IO_DATA['to_date'].strftime(fmt)\n return self.IO_DATA['to_date']\n\n def is_entity_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_entity_model()\n )\n\n def is_ledger_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_ledger_model()\n )\n\n def is_unit_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_unit_model()\n )\n\n def is_by_unit(self) -> bool:\n return self.IO_DATA['by_unit']\n\n def is_by_period(self) -> bool:\n return self.IO_DATA['by_period']\n\n def is_by_activity(self) -> bool:\n return self.IO_DATA['by_activity']\n\n # Balance Sheet Data...\n def has_balance_sheet(self) -> bool:\n return 'balance_sheet' in self.IO_DATA\n\n def get_balance_sheet_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['balance_sheet']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have balance sheet information available.'\n )\n\n # Income Statement Data...\n def has_income_statement(self) -> bool:\n return 'income_statement' in self.IO_DATA\n\n def get_income_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['income_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have income statement information available.'\n )\n\n # Cash Flow Statement Data...\n def has_cash_flow_statement(self):\n return 'cash_flow_statement' in self.IO_DATA\n\n def get_cash_flow_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['cash_flow_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have cash flow statement information available.'\n )\n\n # CLOSING ENTRIES...\n\n def get_closing_entry_data(self):\n io_data = self.get_io_data()\n return io_data['accounts']" }, { "identifier": "FinancialRatioManager", "path": "django_ledger/io/ratios.py", "snippet": "class FinancialRatioManager:\n\n def __init__(self, io_data):\n self.DIGEST = io_data\n self.ACCOUNTS = io_data['accounts']\n self.RATIO_NA = RATIO_NA\n\n self.quick_assets = io_data['group_balance']['GROUP_QUICK_ASSETS']\n self.assets = io_data['group_balance']['GROUP_ASSETS']\n self.current_liabilities = io_data['group_balance']['GROUP_CURRENT_LIABILITIES']\n self.current_assets = io_data['group_balance']['GROUP_CURRENT_ASSETS']\n self.equity = io_data['group_balance']['GROUP_CAPITAL']\n self.liabilities = io_data['group_balance']['GROUP_LIABILITIES']\n self.net_income = io_data['group_balance']['GROUP_EARNINGS']\n self.net_sales = io_data['group_balance']['GROUP_NET_SALES']\n self.net_profit = io_data['group_balance']['GROUP_NET_PROFIT']\n self.gross_profit = io_data['group_balance']['GROUP_GROSS_PROFIT']\n self.RATIOS = dict()\n\n def digest(self):\n self.quick_ratio()\n self.current_ratio()\n self.debt_to_equity()\n self.return_on_equity()\n self.return_on_assets()\n self.net_profit_margin()\n self.gross_profit_margin()\n self.DIGEST['ratios'] = self.RATIOS\n return self.DIGEST\n\n # ------> SOLVENCY RATIOS <------\n def quick_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = self.RATIO_NA\n else:\n cr = self.quick_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['quick_ratio'] = cr\n\n def current_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = RATIO_NA\n else:\n cr = self.current_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['current_ratio'] = cr\n\n # ------> LEVERAGE RATIOS <------\n def debt_to_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.liabilities / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['debt_to_equity'] = cr\n\n # ------> PROFITABILITY RATIOS <------\n def return_on_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_equity'] = cr\n\n def return_on_assets(self, as_percent=False):\n if self.assets == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.assets\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_assets'] = cr\n\n def net_profit_margin(self, as_percent=False):\n if self.net_sales == 0:\n npm = RATIO_NA\n else:\n npm = self.net_profit / self.net_sales\n if as_percent:\n npm = npm * 100\n self.RATIOS['net_profit_margin'] = npm\n\n def gross_profit_margin(self, as_percent=False):\n if self.gross_profit == 0:\n gpm = RATIO_NA\n else:\n gpm = self.gross_profit / self.net_sales\n if as_percent:\n gpm = gpm * 100\n self.RATIOS['gross_profit_margin'] = gpm" }, { "identifier": "lazy_loader", "path": "django_ledger/models/utils.py", "snippet": "class LazyLoader:\n ENTITY_MODEL = None\n ENTITY_STATE_MODEL = None\n UNIT_MODEL = None\n ACCOUNT_MODEL = None\n BANK_ACCOUNT_MODEL = None\n LEDGER_MODEL = None\n TXS_MODEL = None\n JE_MODEL = None\n ITEM_MODEL = None\n ITEM_TRANSACTION_MODEL = None\n CUSTOMER_MODEL = None\n INVOICE_MODEL = None\n BILL_MODEL = None\n UOM_MODEL = None\n VENDOR_MODEL = None\n TRANSACTION_MODEL = None\n ENTITY_UNIT_MODEL = None\n PURCHASE_ORDER_MODEL = None\n ESTIMATE_MODEL = None\n CLOSING_ENTRY_MODEL = None\n CLOSING_ENTRY_TRANSACTION_MODEL = None\n ENTITY_DATA_GENERATOR = None\n BALANCE_SHEET_REPORT_CLASS = None\n INCOME_STATEMENT_REPORT_CLASS = None\n CASH_FLOW_STATEMENT_REPORT_CLASS = None\n def get_entity_model(self):\n def get_entity_state_model(self):\n def get_bank_account_model(self):\n def get_account_model(self):\n def get_txs_model(self):\n def get_purchase_order_model(self):\n def get_ledger_model(self):\n def get_unit_model(self):\n def get_journal_entry_model(self):\n def get_item_model(self):\n def get_item_transaction_model(self):\n def get_customer_model(self):\n def get_bill_model(self):\n def get_invoice_model(self):\n def get_uom_model(self):\n def get_vendor_model(self):\n def get_transaction_model(self):\n def get_entity_unit_model(self):\n def get_estimate_model(self):\n def get_entity_data_generator(self):\n def get_closing_entry_model(self):\n def get_closing_entry_transaction_model(self):\n def get_balance_sheet_report_class(self):\n def get_income_statement_report_class(self):\n def get_cash_flow_statement_report_class(self):" } ]
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
15,717
'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles: roles_mgr = RoleContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) # idea: change digest() name to something else? maybe aggregate, calculate?... io_data = roles_mgr.digest() if any([ process_groups, balance_sheet_statement, income_statement, cash_flow_statement ]): group_mgr = GroupContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) io_data = group_mgr.digest() # todo: migrate this to group manager... io_data['group_account']['GROUP_ASSETS'].sort( key=lambda acc: roles_module.ROLES_ORDER_ASSETS.index(acc['role'])) io_data['group_account']['GROUP_LIABILITIES'].sort( key=lambda acc: roles_module.ROLES_ORDER_LIABILITIES.index(acc['role'])) io_data['group_account']['GROUP_CAPITAL'].sort( key=lambda acc: roles_module.ROLES_ORDER_CAPITAL.index(acc['role'])) if process_ratios: ratio_gen = FinancialRatioManager(io_data=io_data) io_data = ratio_gen.digest() if process_activity: activity_manager = ActivityContextManager(io_data=io_data, by_unit=by_unit, by_period=by_period) activity_manager.digest() if balance_sheet_statement:
""" Django Ledger created by Miguel Sanda <[email protected]>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <[email protected]> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt: raise InvalidDateInputError( message=f'Could not parse date from {dt}' ) elif is_naive(fdt): fdt = make_aware(fdt) return fdt if no_parse_localdate: return localtime() def validate_dates( from_date: Union[str, datetime, date] = None, to_date: Union[str, datetime, date] = None) -> Tuple[date, date]: from_date = validate_io_date(from_date, no_parse_localdate=False) to_date = validate_io_date(to_date) return from_date, to_date def validate_activity(activity: str, raise_404: bool = False): # idea: move to model???... JournalEntryModel = lazy_loader.get_journal_entry_model() valid = activity in JournalEntryModel.VALID_ACTIVITIES if activity and not valid: exception = ValidationError(f'{activity} is invalid. Choices are {JournalEntryModel.VALID_ACTIVITIES}.') if raise_404: raise Http404(exception) raise exception return activity class IOValidationError(ValidationError): pass class IODatabaseMixIn: """ Controls how transactions are recorded into the ledger. """ def is_entity_model(self): return isinstance(self, lazy_loader.get_entity_model()) def is_ledger_model(self): return isinstance(self, lazy_loader.get_ledger_model()) def is_entity_unit_model(self): return isinstance(self, lazy_loader.get_unit_model()) def get_entity_model_from_io(self): if self.is_entity_model(): return self elif self.is_ledger_model(): return self.entity elif self.is_entity_unit_model(): return self.entity # def is_time_bounded(self, from_date, to_date): def database_digest(self, txs_queryset: QuerySet, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, from_date: date = None, to_date: date = None, activity: str = None, role: str = None, accounts: str or List[str] or Set[str] = None, posted: bool = True, exclude_zero_bal: bool = True, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, by_unit: bool = False, **kwargs): if settings.DJANGO_LEDGER_USE_CLOSING_ENTRIES: if not from_date: entity_model = self.get_entity_model_from_io() closing_entry_date = entity_model.select_closing_entry_for_io_date(to_date=to_date) # print(closing_entry_date) # # if closing_entry_date: # closing_entry_list = entity_model.get_closing_entry_cache_for_date( # closing_date=closing_entry_date, # force_cache_update=True # ) # from_date_d = closing_entry_date + timedelta(days=1) # print('Orig From:', from_date) # print('New from:', from_date_d) # print('To Date:', to_date) # print(closing_entry_list) if not txs_queryset: TransactionModel = lazy_loader.get_txs_model() if self.is_entity_model(): if entity_slug: if entity_slug != self.slug: raise IOValidationError('Inconsistent entity_slug. ' f'Provided {entity_slug} does not match actual {self.slug}') if unit_slug: txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug or self.slug, unit_slug=unit_slug ) else: txs_queryset = TransactionModel.objects.for_entity( user_model=user_model, entity_slug=self ) elif self.is_ledger_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Ledger Model requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_ledger( user_model=user_model, entity_slug=entity_slug, ledger_model=self ) elif self.is_entity_unit_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Entity Unit requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug, unit_slug=unit_slug or self ) else: txs_queryset = TransactionModel.objects.none() txs_queryset = txs_queryset.not_closing_entry() if exclude_zero_bal: txs_queryset = txs_queryset.filter(amount__gt=0) if posted: txs_queryset = txs_queryset.posted() if from_date: txs_queryset = txs_queryset.from_date(from_date=from_date) if to_date: txs_queryset = txs_queryset.to_date(to_date=to_date) if accounts: if not isinstance(accounts, str): accounts = [accounts] txs_queryset = txs_queryset.for_accounts(account_list=accounts) if activity: if isinstance(activity, str): activity = [activity] txs_queryset = txs_queryset.for_activity(activity_list=activity) if role: txs_queryset = txs_queryset.for_roles(role_list=role) VALUES = [ 'account__uuid', 'account__balance_type', 'tx_type', 'account__code', 'account__name', 'account__role', ] ANNOTATE = {'balance': Sum('amount')} ORDER_BY = ['account__uuid'] if by_unit: ORDER_BY.append('journal_entry__entity_unit__uuid') VALUES += ['journal_entry__entity_unit__uuid', 'journal_entry__entity_unit__name'] if by_period: ORDER_BY.append('journal_entry__timestamp') ANNOTATE['dt_idx'] = TruncMonth('journal_entry__timestamp') if by_activity: ORDER_BY.append('journal_entry__activity') VALUES.append('journal_entry__activity') if by_tx_type: ORDER_BY.append('tx_type') VALUES.append('tx_type') return txs_queryset.values(*VALUES).annotate(**ANNOTATE).order_by(*ORDER_BY) def python_digest(self, txs_queryset: Optional[QuerySet] = None, user_model: Optional[UserModel] = None, to_date: date = None, from_date: date = None, equity_only: bool = False, activity: str = None, entity_slug: str = None, unit_slug: str = None, role: Optional[Union[Set[str], List[str]]] = None, accounts: Optional[Union[Set[str], List[str]]] = None, signs: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, **kwargs) -> list or tuple: if equity_only: role = roles_module.GROUP_EARNINGS txs_queryset = self.database_digest( user_model=user_model, txs_queryset=txs_queryset, to_date=to_date, from_date=from_date, entity_slug=entity_slug, unit_slug=unit_slug, activity=activity, role=role, accounts=accounts, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, by_period=by_period, **kwargs) for tx_model in txs_queryset: if tx_model['account__balance_type'] != tx_model['tx_type']: tx_model['balance'] = -tx_model['balance'] # txs_list = list(txs_queryset) # txs_list.sort(key=lambda a: ( # a['account__uuid'], # str(a.get('journal_entry__entity_unit__uuid', '')) if by_unit else '', # a['dt_idx'].year if by_period else 0, # a['dt_idx'].month if by_period else 0, # str(a['journal_entry__activity']) if by_activity else None, # a['tx_type'] if by_tx_type else '', # )) accounts_gb_code = groupby(txs_queryset, key=lambda a: ( a['account__uuid'], a.get('journal_entry__entity_unit__uuid') if by_unit else None, a.get('dt_idx').year if by_period else None, a.get('dt_idx').month if by_period else None, a.get('journal_entry__activity') if by_activity else None, a.get('tx_type') if by_tx_type else None, )) gb_digest = [self.aggregate_balances(k, g) for k, g in accounts_gb_code] for acc in gb_digest: acc['balance_abs'] = abs(acc['balance']) if signs: TransactionModel = lazy_loader.get_txs_model() for acc in gb_digest: if any([ all([acc['role_bs'] == roles_module.BS_ASSET_ROLE, acc['balance_type'] == TransactionModel.CREDIT]), all([acc['role_bs'] in ( roles_module.BS_LIABILITIES_ROLE, roles_module.BS_EQUITY_ROLE ), acc['balance_type'] == TransactionModel.DEBIT]) ]): acc['balance'] = -acc['balance'] return txs_queryset, gb_digest @staticmethod def aggregate_balances(k, g): gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles: roles_mgr = RoleContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) # idea: change digest() name to something else? maybe aggregate, calculate?... io_data = roles_mgr.digest() if any([ process_groups, balance_sheet_statement, income_statement, cash_flow_statement ]): group_mgr = GroupContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) io_data = group_mgr.digest() # todo: migrate this to group manager... io_data['group_account']['GROUP_ASSETS'].sort( key=lambda acc: roles_module.ROLES_ORDER_ASSETS.index(acc['role'])) io_data['group_account']['GROUP_LIABILITIES'].sort( key=lambda acc: roles_module.ROLES_ORDER_LIABILITIES.index(acc['role'])) io_data['group_account']['GROUP_CAPITAL'].sort( key=lambda acc: roles_module.ROLES_ORDER_CAPITAL.index(acc['role'])) if process_ratios: ratio_gen = FinancialRatioManager(io_data=io_data) io_data = ratio_gen.digest() if process_activity: activity_manager = ActivityContextManager(io_data=io_data, by_unit=by_unit, by_period=by_period) activity_manager.digest() if balance_sheet_statement:
balance_sheet_mgr = BalanceSheetStatementContextManager(io_data=io_data)
7
2023-10-20 01:07:20+00:00
24k
acolas1/KGSimple
simplify.py
[ { "identifier": "FluencyScorer", "path": "scoring/fluency_scorer.py", "snippet": "class FluencyScorer:\n def __init__(self, batch_size=1, reduce=\"mean\", log=True, laplace_smooth=False, prob_dict_path=None):\n self.device = \"cuda:1\" if torch.cuda.is_available() else \"cpu\"\n self.batch_size = batch_size\n self.reduce = reduce\n self.log = log\n self.laplace_smooth = laplace_smooth\n self.tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n self.scorer = LMScorer.from_pretrained(\"gpt2\", device=self.device, batch_size=batch_size)\n self.idf_df = pd.read_csv(prob_dict_path, ',', encoding='utf-8')\n self.freq_dict = pd.Series((self.idf_df.frequency.values), index=self.idf_df.token).to_dict()\n self.num_tokens = self.idf_df.total.values[0] \n \n def unigram_score(self, sentences):\n if self.freq_dict is None:\n raise Exception(\"Probability dictionary is not defined.\") \n unigram_scores = []\n for sent in sentences:\n unigram_prob = 1\n for token in word_tokenize(sent.lower()):\n if token in self.freq_dict:\n if self.laplace_smooth:\n curr_unigram_prob = (self.freq_dict[token]+1)/(self.num_tokens+len(self.freq_dict))\n else:\n curr_unigram_prob = self.freq_dict[token]/self.num_tokens\n \n \n\n else:\n if self.laplace_smooth:\n curr_unigram_prob = (1/(self.num_tokens+len(self.freq_dict)))\n else:\n curr_unigram_prob = 1\n # unigram_prob += curr_unigram_prob\n \n \n if self.log:\n unigram_prob +=np.log(curr_unigram_prob)\n else:\n unigram_prob *= curr_unigram_prob\n uni_score = unigram_prob/len(word_tokenize(sent))\n unigram_scores.append(uni_score)\n return unigram_scores\n \n def SLOR_score(self, sentence_list, lm_score, unigram_score):\n SLOR_scores = []\n for i in range(len(sentence_list)):\n SLOR_score = lm_score[i]-unigram_score[i]\n if self.log:\n SLOR_score = math.exp(lm_score[i]-unigram_score[i])\n SLOR_scores.append(SLOR_score)\n return SLOR_scores\n \n def score_batched(self, generated_texts, source_texts=None, printing=False, **kwargs):\n sources_SLOR_score, generateds_SLOR_score = None, None\n if source_texts:\n sources_lm_prob_scores = self.scorer.sentence_score(source_texts, reduce=self.reduce, log=self.log)\n sources_unigram_scores = self.unigram_score(source_texts)\n sources_SLOR_score = self.SLOR_score(source_texts, sources_lm_prob_scores, sources_unigram_scores)\n\n\n\n generateds_lm_prob_scores = self.scorer.sentence_score(generated_texts, reduce=self.reduce, log=self.log)\n generateds_unigram_scores = self.unigram_score(generated_texts)\n generateds_SLOR_score = self.SLOR_score(generated_texts, generateds_lm_prob_scores, generateds_unigram_scores)\n \n if printing:\n print(\"[source_sents]\", source_texts)\n print(\"[source_lm]\", sources_lm_prob_scores)\n print(\"[source_unigram]\", sources_unigram_scores)\n print(\"[source_scores]\", sources_SLOR_score)\n print(\"[generated_sents]\", generated_texts)\n print(\"[generated_lm]\", generateds_lm_prob_scores)\n print(\"[generated_unigram]\", generateds_unigram_scores)\n print(\"[generated_scores]\", generateds_SLOR_score)\n return {\"scores\": generateds_SLOR_score, \"source_scores\": sources_SLOR_score}\n\n def score(self, generated_text, source_text=None, printing=False, **kwargs):\n # sources_lm_prob_score = scorer.sentence_score(source_list, reduce=\"mean\")\n \n sources_SLOR_score, generateds_SLOR_score = None, None\n if source_text:\n source_list = [source_text]\n sources_lm_prob_scores = self.scorer.sentence_score(source_list, reduce=self.reduce, log=self.log)\n sources_unigram_scores = self.unigram_score(source_list)\n sources_SLOR_score = self.SLOR_score(source_list, sources_lm_prob_scores, sources_unigram_scores)\n \n \n \n generateds_list = [generated_text]\n generateds_lm_prob_scores = self.scorer.sentence_score(generateds_list, reduce=self.reduce, log=self.log)\n generateds_unigram_scores = self.unigram_score(generateds_list)\n generateds_SLOR_score = self.SLOR_score(generateds_list, generateds_lm_prob_scores, generateds_unigram_scores)\n \n if printing:\n print(\"[source_sents]\", source_text)\n print(\"[source_lm]\", sources_lm_prob_scores)\n print(\"[source_unigram]\", sources_unigram_scores)\n print(\"[source_scores]\", sources_SLOR_score)\n print(\"[generated_sents]\", generated_text)\n print(\"[generated_lm]\", generateds_lm_prob_scores)\n print(\"[generated_unigram]\", generateds_unigram_scores)\n print(\"[generated_scores]\", generateds_SLOR_score)\n return {\"scores\": generateds_SLOR_score, \"source_scores\": sources_SLOR_score}" }, { "identifier": "SaliencyBERTScore", "path": "scoring/saliency_scorer.py", "snippet": "class SaliencyBERTScore:\n def __init__(self, lmscorer = \"bertscore\", lang=\"en\"):\n self.bertscore = evaluate.load(lmscorer)\n self.lang = lang\n\n\n def calc_BERT_score(self, predictions, references, sigmoid):\n results = self.bertscore.compute(predictions=predictions, references=references, lang=self.lang)\n if sigmoid:\n results = expit(results)\n return results\n\n def score_batched(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n bert_score = self.calc_BERT_score(generated_text, source_text, sigmoid)\n f1 = bert_score['f1']\n \n if printing:\n print(\"scores: \", str(f1))\n return {\"scores\": f1}\n\n def score(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n bert_score = self.calc_BERT_score([generated_text], [source_text], sigmoid)\n f1 = bert_score['f1']\n \n if printing:\n print(\"scores: \", str(f1))\n return {\"scores\": f1}" }, { "identifier": "SimplicityTextScore", "path": "scoring/simplicity_scorer.py", "snippet": "class SimplicityTextScore:\n def __init__(self):\n pass\n\n def calc_FRE(self, text, sigmoid):\n min_val = -30\n score = textstat.flesch_reading_ease(text)\n scaled_score = (score - min_val) / (121.22 - min_val)\n # Clamp scaled_score to the range [0, 1]\n scaled_score = max(0, min(scaled_score, 1))\n \n if sigmoid:\n scaled_score = expit(scaled_score)\n \n return scaled_score\n \n \n \n def calc_FKGL(self, text, sigmoid):\n score = max(0,textstat.flesch_kincaid_grade(text))\n if sigmoid:\n score = expit(score)\n return score\n\n def score_batched(self, generated_texts, source_texts=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = [],[]\n \n for text in generated_texts:\n gen_score.append(self.calc_FRE(text, sigmoid))\n \n \n if source_texts:\n for text in source_texts:\n source_score.append(self.calc_FRE(text, sigmoid))\n \n if printing:\n print(\"score: \", gen_score)\n print(\"source_score: \", source_score)\n return {\"scores\": gen_score, \"source_scores\": source_score}\n \n def score(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n \n gen_score = self.calc_FRE(generated_text, sigmoid)\n \n if source_text:\n source_score = self.calc_FRE(source_text, sigmoid)\n \n if printing:\n print(\"score: \", gen_score)\n print(\"source_score: \", source_score)\n return {\"scores\": gen_score, \"source_scores\": source_score}" }, { "identifier": "ScorerWrapper", "path": "scoring/aggregate_scorer.py", "snippet": "class ScorerWrapper:\n def __init__(self, scorers, scoring_method=\"logsum\", batch_size=1):\n assert scoring_method in [\"product\", \"logsum\"], \"Unrecognized `scoring_method`\"\n \n self.scorers = scorers\n self.scoring_method = scoring_method\n\n # if self.scoring_method == \"logsum\":\n # self.score_func = logsum_score\n # elif self.scoring_method == \"product\":\n # self.score_func = product_score\n \n if batch_size > 1:\n exec(\"self.score_func = {}\".format(self.scoring_method+\"_\"+\"score_batched\"))\n else:\n exec(\"self.score_func = {}\").format(self.scoring_method+\"_\"+\"score\")\n self.batch_size = batch_size\n def get_score_names(self):\n return [s[\"name\"] for s in self.scorers]\n \n def score_batched(self, input_texts=None, generated_texts=None, old_kgs=None, new_kgs=None, dels_ents=None, partial=False, printing=False, timings=False, extras={}, progress=False):\n assert len(input_texts) == len(generated_texts) == len(old_kgs) == len(new_kgs) == len(dels_ents), \"Data lengths don't match\"\n \n data_list = []\n for inp, gen, old_kg, new_kg, del_ents in zip(input_texts, generated_texts, old_kgs, new_kgs, dels_ents):\n data_list.append({\"inp\": inp, \"gen\": gen, \"old_kg\": old_kg, \"new_kg\": new_kg, \"del_ents\": del_ents})\n\n if len(data_list) == 0:\n progress = False\n \n for batch in batcher(data_list, batch_size=self.batch_size, progress=progress):\n batch_inputs = [instance_dict[\"inp\"] for instance_dict in batch]\n batch_gens = [instance_dict[\"gen\"] for instance_dict in batch]\n batch_old_kgs = [instance_dict[\"old_kg\"] for instance_dict in batch]\n batch_new_kgs = [instance_dict[\"new_kg\"] for instance_dict in batch]\n batch_dels_ents = [instance_dict[\"del_ents\"] for instance_dict in batch]\n batch_scores = self.score_func(self.scorers, batch_inputs, batch_gens, batch_old_kgs, batch_new_kgs, batch_dels_ents)\n for score_type, scores in batch_scores.items():\n if type(scores) in [torch.Tensor, np.array, np.ndarray]:\n batch_scores[score_type] = scores.tolist()\n\n if printing:\n print(\"[total]\", all_outputs[\"total_scores\"])\n return batch_scores\n \n def score(self, input_text=None, generated_text=None, old_kg=None, new_kg=None, del_ents=None):\n aggregate_score = self.score_func(self.scorers, input_text, generated_text, old_kg, new_kg, del_ents)\n return aggregate_score\n \n\n def __call__(self, graphs, input_text, generated_text, **kwargs):\n return self.score(graphs, input_text, generated_text, **kwargs)" }, { "identifier": "GAPDataloader", "path": "GAP/data_relations_as_nodes.py", "snippet": "class GAPDataloader(DataLoader):\n\n def __init__(self, args, dataset, mode):\n if mode == \"train\":\n sampler = RandomSampler(dataset)\n batch_size = args.train_batch_size\n else:\n sampler = SequentialSampler(dataset)\n batch_size = args.predict_batch_size\n super(GAPDataloader, self).__init__(dataset, sampler=sampler, batch_size=batch_size,\n num_workers=args.num_workers)" }, { "identifier": "EventDataset", "path": "GAP/data_relations_as_nodes.py", "snippet": "class EventDataset(Dataset):\n def __init__(self, logger, args, data, tokenizer, mode):\n self.data = data\n self.tokenizer = tokenizer\n self.topology = {\"entity-entity\": args.entity_entity, \n \"entity-relation\": args.entity_relation,\n \"relation-entity\": args.relation_entity,\n \"relation-relation\": args.relation_relation\n } \n \n \n \n print(\"Total samples = {}\".format(len(self.data)))\n\n \n assert type(self.data) == list\n self.args = args\n self.data_type = mode\n self.metric = \"BLEU\"\n self.head_ids, self.rel_ids, self.tail_ids = self.tokenizer.encode(' [head]', add_special_tokens=False), \\\n self.tokenizer.encode(' [relation]', add_special_tokens=False), \\\n self.tokenizer.encode(' [tail]', add_special_tokens=False)\n self.graph_ids, self.text_ids = self.tokenizer.encode(' [graph]', add_special_tokens=False), \\\n self.tokenizer.encode(' [text]', add_special_tokens=False)\n\n if self.args.model_name == \"bart\":\n self.mask_token = self.tokenizer.mask_token\n self.mask_token_id = self.tokenizer.mask_token_id\n else:\n self.mask_token = self.tokenizer.additional_special_tokens[0]\n self.mask_token_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.additional_special_tokens[0])\n\n if self.args.model_name == \"bart\":\n if self.args.append_another_bos:\n self.add_bos_id = [self.tokenizer.bos_token_id] * 2\n else:\n self.add_bos_id = [self.tokenizer.bos_token_id]\n else:\n self.add_bos_id = []\n\n def __len__(self):\n return len(self.data)\n \n def graph_size(self,idx):\n entry = self.data[idx]\n kg = entry[0]\n \n kg_list = []\n triple_list = kg.split('<S>')\n triple_list = [triple_list[0]] + ['<S>'+triple for triple in triple_list[1:]]\n triple_list = list(filter(None,triple_list))\n for triple in triple_list:\n head = re.search('<S>(.*)<P>', triple).group(1).strip()\n rel = re.search('<P>(.*)<O>', triple).group(1).strip()\n tail = re.search('<O>(.*)', triple).group(1).strip()\n kg_list.append([head,rel,tail])\n \n \n\n strings_label = []\n node_ids = []\n edge_ids = []\n strings_label_tokens = ''\n\n \n text_entity, text_relation = self.get_all_entities_per_sample(kg_list)\n entity_change, relation_change = self.get_change_per_sample(text_entity, text_relation)\n return len(entity_change)\n\n def graph_linearize(self, triple, entity_change, head_ids, rel_ids, tail_ids,\n relation_change, cnt_edge, adj_matrix):\n # string_label: encoder ids\n # string_label_tokens: encoder tokens\n if len(triple[0]) == 0:\n return [], '', [], [], cnt_edge, adj_matrix\n nodes, edges = [], []\n string_label = copy.deepcopy(head_ids)\n string_label_tokens = ' <S>'\n nodes.extend([-1] * len(string_label))\n edges.extend([-1] * len(string_label))\n\n\n string_label += entity_change[triple[0]][0]\n string_label_tokens += ' {}'.format(triple[0])\n nodes.extend([entity_change[triple[0]][1]] * len(entity_change[triple[0]][0]))\n edges.extend([-1] * len(entity_change[triple[0]][0]))\n\n\n if len(triple[1]) != 0 and len(triple[2]) != 0:\n rel_label = relation_change[triple[1]]\n rel_ent_label = entity_change[triple[1]][1]\n rel_label_token = copy.deepcopy(triple[1])\n words_label = rel_ids + rel_label + tail_ids + entity_change[triple[2]][0]\n words_label_tokens = ' <P> {} <O> {}'.format(rel_label_token, triple[2])\n nodes.extend(\n ([-1] * len(rel_ids)) + ([entity_change[triple[1]][1]] * len(rel_label)) + ([-1] * len(tail_ids)) + ([entity_change[triple[2]][1]] * len(\n entity_change[triple[2]][0])))\n edges.extend([-1] * len(rel_ids) + [cnt_edge] * len(rel_label) + [-1] * (\n len(tail_ids) + len(entity_change[triple[2]][0])))\n if entity_change[triple[0]][1] < len(adj_matrix) and entity_change[triple[2]][1] < len(adj_matrix):\n\n\n if self.topology['entity-entity']:\n adj_matrix[entity_change[triple[0]][1]][entity_change[triple[2]][1]] = 1\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[0]][1]] = 1\n\n if self.topology['entity-relation']:\n adj_matrix[entity_change[triple[0]][1]][entity_change[triple[1]][1]] = 2\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[1]][1]] = 2\n\n if self.topology['relation-entity']:\n adj_matrix[entity_change[triple[1]][1]][entity_change[triple[0]][1]] = 3\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[1]][1]] = 3\n \n if not self.topology['relation-entity'] and not self.topology['relation-relation']:\n adj_matrix[entity_change[triple[1]][1]][entity_change[triple[1]][1]] = 10\n\n if not self.topology['entity-relation'] and not self.topology['entity-entity']:\n adj_matrix[entity_change[triple[0]][1]][entity_change[triple[0]][1]] = 10\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[2]][1]] = 10\n\n cnt_edge += 1\n string_label += words_label\n string_label_tokens += words_label_tokens\n\n assert len(string_label) == len(nodes) == len(edges)\n\n return string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix\n\n def relation_to_relation_fill(self, node_dict, rel_dict, adj_matrix):\n adj_matrix_temp = np.array(adj_matrix)\n rel_idx_list = []\n for rel in rel_dict.keys():\n rel_idx = node_dict[rel][1]\n rel_idx_list.append(rel_idx)\n adj_matrix_np = np.array(adj_matrix)\n adj_matrix_np_bool = (adj_matrix_np==-1)\n #reassign -1s to 0s\n adj_matrix_np[adj_matrix_np_bool] = 0\n #get squared matrix for r-r\n adj_matrix_sq = adj_matrix_np@adj_matrix_np\n \n #old adj_matrix + squared matrix only r-r\n rel_idx_list = np.array(rel_idx_list, dtype=np.intp)\n adj_matrix_temp[rel_idx_list[:,np.newaxis], rel_idx_list] = (adj_matrix_sq[rel_idx_list][:,rel_idx_list] > 0)*4\n adj_matrix_new = adj_matrix_temp.tolist()\n \n return adj_matrix_new\n \n def get_all_entities_per_sample(self, triple_list):\n text_entity = set()\n text_relation = set()\n for triple in triple_list:\n if len(triple[0]) == 0:\n continue\n if len(triple[1]) != 0 and len(triple[2]) != 0:\n text_relation.add(triple[1])\n text_entity.add(triple[0])\n text_entity.add(triple[2])\n \n text_entity_list = list(text_entity)+list(text_relation)\n text_relation_list = list(text_relation)\n \n return text_entity_list, text_relation_list\n\n def get_change_per_sample(self, text_entity, text_relation):\n # during fine-tuning, we don't mask entities or relations\n ent_change = {}\n total_entity = text_entity\n\n for ent_id in range(len(total_entity)):\n entity_toks = self.tokenizer.encode(\" {}\".format(total_entity[ent_id]), add_special_tokens=False)\n ent_change[total_entity[ent_id]] = [entity_toks, ent_id]\n \n # relation change only includes the relation tokens and ids\n rel_change = {}\n for rel_id in range(len(text_relation)):\n rel_change[text_relation[rel_id]] = self.tokenizer.encode(' {}'.format(text_relation[rel_id]),\n add_special_tokens=False)\n\n return ent_change, rel_change\n\n def truncate_pair_ar(self, a, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n # add_bos_id + graph_ids + a + text_ids + b + eos_token_id\n length_a_b = self.args.max_input_length - len(add_bos_id) - len(graph_ids) - len(text_ids) - 1\n if len(a) > length_a_b:\n a = a[:length_a_b]\n node_ids = node_ids[:length_a_b]\n edge_ids = edge_ids[:length_a_b]\n input_ids = add_bos_id + graph_ids + a + text_ids + [self.tokenizer.eos_token_id]\n input_node_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + node_ids + [-1] * (len(text_ids) + 1)\n input_edge_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + edge_ids + [-1] * (len(text_ids) + 1)\n attn_mask = [1] * len(input_ids) + [0] * (self.args.max_input_length - len(input_ids))\n input_ids += [self.tokenizer.pad_token_id] * (self.args.max_input_length - len(input_ids))\n input_node_ids += [-1] * (self.args.max_input_length - len(input_node_ids))\n input_edge_ids += [-1] * (self.args.max_input_length - len(input_edge_ids))\n assert len(input_ids) == len(attn_mask) == self.args.max_input_length == len(input_node_ids) == len(\n input_edge_ids)\n return input_ids, attn_mask, input_node_ids, input_edge_ids\n\n \n def ar_prep_data(self, questions, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n input_ids, input_attn_mask, input_node_ids, input_edge_ids = self.truncate_pair_ar(questions, add_bos_id,\n graph_ids, text_ids,\n node_ids, edge_ids)\n\n return input_ids, input_attn_mask, input_node_ids, input_edge_ids\n\n\n\n def __getitem__(self, idx):\n kg = self.data[idx]\n # print(\"KG: \", kg)\n kg_list = []\n triple_list = kg.split('<S>')\n triple_list = [triple_list[0]] + ['<S>'+triple for triple in triple_list[1:]]\n triple_list = list(filter(None,triple_list))\n for triple in triple_list:\n head = re.search('<S>(.*)<P>', triple).group(1).strip()\n rel = re.search('<P>(.*)<O>', triple).group(1).strip()\n tail = re.search('<O>(.*)', triple).group(1).strip()\n kg_list.append([head,rel,tail])\n \n strings_label = []\n node_ids = []\n edge_ids = []\n strings_label_tokens = ''\n\n # print(\"kg_list: \", kg_list)\n text_entity, text_relation = self.get_all_entities_per_sample(kg_list)\n entity_change, relation_change = self.get_change_per_sample(text_entity, text_relation)\n adj_matrix = [[-1] * (self.args.max_node_length + 1) for _ in range(self.args.max_node_length + 1)]\n\n cnt_edge = 0\n\n for i, triple in enumerate(kg_list):\n string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix = self.graph_linearize(\n triple,\n entity_change,\n self.head_ids,\n self.rel_ids, self.tail_ids,\n relation_change, cnt_edge, adj_matrix)\n \n strings_label += string_label\n strings_label_tokens += string_label_tokens\n node_ids += nodes\n edge_ids += edges\n if self.topology['relation-relation']:\n adj_matrix = self.relation_to_relation_fill(entity_change, relation_change, adj_matrix)\n \n words_label_ids, words_label_tokens, words_input_ids, words_input_tokens = [], '', [], ''\n# current_text = entry[1]\n \n# for word in current_text.split():\n# word_label_ids = self.tokenizer.encode(\" {}\".format(word), add_special_tokens=False)\n# word_label_tokens = copy.deepcopy(word)\n\n# words_label_ids += word_label_ids\n# words_label_tokens += ' ' + word_label_tokens\n # print(\"strings_label: \", strings_label)\n # print(\"node_ids: \", node_ids)\n # print(\"edge_ids: \", edge_ids)\n # print(\"self.add_bos_id: \", self.add_bos_id)\n # print(\"self.graph_ids: \", self.graph_ids)\n input_ids_ar, attn_mask_ar, input_node_ids_ar, input_edge_ids_ar = \\\n self.ar_prep_data(strings_label, self.add_bos_id, self.graph_ids,\n self.text_ids, node_ids, edge_ids)\n node_length_ar = max(input_node_ids_ar) + 1\n edge_length_ar = max(input_edge_ids_ar) + 1\n \n\n def masked_fill(src, masked_value, fill_value):\n return [src[src_id] if src[src_id] != masked_value and src[src_id] < fill_value else fill_value for src_id\n in range(len(src))]\n\n input_node_ids_ar, input_edge_ids_ar = masked_fill(input_node_ids_ar, -1, self.args.max_node_length), \\\n masked_fill(input_edge_ids_ar, -1, self.args.max_edge_length)\n\n def masked_fill_matrix(adj_matrix_input, masked_value, fill_value):\n adj_matrix_tmp = copy.deepcopy(adj_matrix_input)\n for a_id in range(len(adj_matrix_tmp)):\n for b_id in range(len(adj_matrix_tmp)):\n if adj_matrix_tmp[a_id][b_id] == masked_value or adj_matrix_tmp[a_id][b_id] > fill_value:\n adj_matrix_tmp[a_id][b_id] = fill_value\n return adj_matrix_tmp\n\n adj_matrix_ar = masked_fill_matrix(adj_matrix, -1, self.args.max_edge_length)\n\n assert len(input_ids_ar) == len(attn_mask_ar) == self.args.max_input_length == len(input_node_ids_ar) == len(\n input_edge_ids_ar)\n\n input_ids_ar = torch.LongTensor(input_ids_ar)\n attn_mask_ar = torch.LongTensor(attn_mask_ar)\n \n input_node_ids_ar = torch.LongTensor(input_node_ids_ar)\n input_edge_ids_ar = torch.LongTensor(input_edge_ids_ar)\n node_length_ar = torch.LongTensor([node_length_ar])\n edge_length_ar = torch.LongTensor([edge_length_ar])\n adj_matrix_ar = torch.LongTensor(adj_matrix_ar)\n \n return input_ids_ar, attn_mask_ar, input_node_ids_ar, node_length_ar, adj_matrix_ar" }, { "identifier": "WebNLGDataset", "path": "GAP/data_relations_as_nodes.py", "snippet": "class WebNLGDataset(Dataset):\n def __init__(self, logger, args, data_path, tokenizer, mode):\n self.data_path = data_path\n self.tokenizer = tokenizer\n self.topology = {\"entity-entity\": args.entity_entity, \n \"entity-relation\": args.entity_relation,\n \"relation-entity\": args.relation_entity,\n \"relation-relation\": args.relation_relation\n } \n \n with open(self.data_path + '.json', 'r') as f:\n self.data = json.load(f)\n\n print(\"Total samples = {}\".format(len(self.data)))\n\n assert type(self.data) == list\n assert all([\"id\" in d for d in self.data]), self.data[0].keys()\n if type(self.data[0][\"id\"]) == int:\n for i in range(len(self.data)):\n self.data[i][\"id\"] = str(self.data[i][\"id\"])\n\n self.args = args\n self.data_type = mode\n self.metric = \"BLEU\"\n\n self.head_ids, self.rel_ids, self.tail_ids = self.tokenizer.encode(' [head]', add_special_tokens=False), \\\n self.tokenizer.encode(' [relation]', add_special_tokens=False), \\\n self.tokenizer.encode(' [tail]', add_special_tokens=False)\n\n self.graph_ids, self.text_ids = self.tokenizer.encode(' [graph]', add_special_tokens=False), \\\n self.tokenizer.encode(' [text]', add_special_tokens=False)\n\n if self.args.model_name == \"bart\":\n self.mask_token = self.tokenizer.mask_token\n self.mask_token_id = self.tokenizer.mask_token_id\n else:\n self.mask_token = self.tokenizer.additional_special_tokens[0]\n self.mask_token_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.additional_special_tokens[0])\n\n if self.args.model_name == \"bart\":\n if self.args.append_another_bos:\n self.add_bos_id = [self.tokenizer.bos_token_id] * 2\n else:\n self.add_bos_id = [self.tokenizer.bos_token_id]\n else:\n self.add_bos_id = []\n\n def __len__(self):\n return len(self.data)\n\n def linearize_v2(self, entity, entity_change, head_ids, rel_ids, tail_ids,\n relation_change, cnt_edge, adj_matrix):\n # string_label: encoder ids\n # string_label_tokens: encoder tokens\n\n if len(entity[0]) == 0:\n return [], '', [], [], cnt_edge, adj_matrix\n nodes, edges = [], []\n string_label = copy.deepcopy(head_ids)\n string_label_tokens = ' [head]'\n nodes.extend([-1] * len(string_label))\n edges.extend([-1] * len(string_label))\n\n\n string_label += entity_change[entity[0]][0]\n string_label_tokens += ' {}'.format(entity[0])\n nodes.extend([entity_change[entity[0]][1]] * len(entity_change[entity[0]][0]))\n edges.extend([-1] * len(entity_change[entity[0]][0]))\n\n\n for rel in entity[2]:\n if len(rel[0]) != 0 and len(rel[1]) != 0:\n rel_label = relation_change[rel[0]]\n rel_ent_label = entity_change[rel[0]][1]\n rel_label_token = copy.deepcopy(rel[0])\n words_label = rel_ids + rel_label + tail_ids + entity_change[rel[1]][0]\n words_label_tokens = ' [relation] {} [tail] {}'.format(rel_label_token, rel[1])\n nodes.extend(\n ([-1] * len(rel_ids)) + ([entity_change[rel[0]][1]] * len(rel_label)) + ([-1] * len(tail_ids)) + ([entity_change[rel[1]][1]] * len(\n entity_change[rel[1]][0])))\n\n \n edges.extend([-1] * len(rel_ids) + [cnt_edge] * len(rel_label) + [-1] * (\n len(tail_ids) + len(entity_change[rel[1]][0])))\n if entity_change[entity[0]][1] < len(adj_matrix) and entity_change[rel[1]][1] < len(adj_matrix):\n if self.topology['entity-entity']:\n adj_matrix[entity_change[entity[0]][1]][entity_change[rel[1]][1]] = 1\n adj_matrix[entity_change[rel[1]][1]][entity_change[entity[0]][1]] = 1\n\n if self.topology['entity-relation']:\n adj_matrix[entity_change[entity[0]][1]][entity_change[rel[0]][1]] = 2\n adj_matrix[entity_change[rel[1]][1]][entity_change[rel[0]][1]] = 2\n \n if self.topology['relation-entity']:\n adj_matrix[entity_change[rel[0]][1]][entity_change[entity[0]][1]] = 3\n adj_matrix[entity_change[rel[0]][1]][entity_change[rel[1]][1]] = 3\n \n if not self.topology['relation-entity'] and not self.topology['relation-relation']:\n adj_matrix[entity_change[rel[0]][1]][entity_change[rel[0]][1]] = 10\n \n if not self.topology['entity-relation'] and not self.topology['entity-entity']:\n adj_matrix[entity_change[entity[0]][1]][entity_change[entity[0]][1]] = 10\n adj_matrix[entity_change[rel[1]][1]][entity_change[rel[1]][1]] = 10\n\n cnt_edge += 1\n string_label += words_label\n string_label_tokens += words_label_tokens\n\n assert len(string_label) == len(nodes) == len(edges)\n\n return string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix\n\n \n def relation_to_relation_fill(self, node_dict, rel_dict, adj_matrix):\n adj_matrix_temp = np.array(adj_matrix)\n rel_idx_list = []\n for rel in rel_dict.keys():\n rel_idx = node_dict[rel][1]\n rel_idx_list.append(rel_idx)\n adj_matrix_np = np.array(adj_matrix)\n adj_matrix_np_bool = (adj_matrix_np==-1)\n #reassign -1s to 0s\n adj_matrix_np[adj_matrix_np_bool] = 0\n #get squared matrix for r-r\n adj_matrix_sq = adj_matrix_np@adj_matrix_np\n \n #old adj_matrix + squared matrix only r-r\n rel_idx_list = np.array(rel_idx_list, dtype=np.intp)\n adj_matrix_temp[rel_idx_list[:,np.newaxis], rel_idx_list] = (adj_matrix_sq[rel_idx_list][:,rel_idx_list] > 0)*4\n adj_matrix_new = adj_matrix_temp.tolist()\n \n return adj_matrix_new\n \n \n def get_all_entities_per_sample(self, mark_entity_number, mark_entity, entry):\n text_entity = set()\n text_relation = set()\n for entity_id in mark_entity_number:\n entity = entry['kbs'][entity_id]\n if len(entity[0]) == 0:\n continue\n for rel in entity[2]:\n if len(rel[0]) != 0 and len(rel[1]) != 0:\n text_relation.add(rel[0])\n text_entity.add(rel[1])\n\n text_entity_list = list(text_entity)+list(text_relation)\n text_relation_list = list(text_relation)\n for entity_ele in mark_entity:\n if entity_ele in text_entity_list:\n text_entity_list.remove(entity_ele)\n \n return text_entity_list, text_relation_list\n\n def get_change_per_sample(self, mark_entity, text_entity, text_relation):\n # during fine-tuning, we don't mask entities or relations\n ent_change = {}\n total_entity = mark_entity + text_entity\n\n for ent_id in range(len(total_entity)):\n entity_toks = self.tokenizer.encode(\" {}\".format(total_entity[ent_id]), add_special_tokens=False)\n ent_change[total_entity[ent_id]] = [entity_toks, ent_id]\n # relation change only includes the relation tokens and ids\n rel_change = {}\n for rel_id in range(len(text_relation)):\n rel_change[text_relation[rel_id]] = self.tokenizer.encode(' {}'.format(text_relation[rel_id]),\n add_special_tokens=False)\n return ent_change, rel_change\n\n def truncate_pair_ar(self, a, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n # add_bos_id + graph_ids + a + text_ids + b + eos_token_id\n length_a_b = self.args.max_input_length - len(add_bos_id) - len(graph_ids) - len(text_ids) - 1\n if len(a) > length_a_b:\n a = a[:length_a_b]\n node_ids = node_ids[:length_a_b]\n edge_ids = edge_ids[:length_a_b]\n input_ids = add_bos_id + graph_ids + a + text_ids + [self.tokenizer.eos_token_id]\n input_node_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + node_ids + [-1] * (len(text_ids) + 1)\n input_edge_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + edge_ids + [-1] * (len(text_ids) + 1)\n attn_mask = [1] * len(input_ids) + [0] * (self.args.max_input_length - len(input_ids))\n input_ids += [self.tokenizer.pad_token_id] * (self.args.max_input_length - len(input_ids))\n input_node_ids += [-1] * (self.args.max_input_length - len(input_node_ids))\n input_edge_ids += [-1] * (self.args.max_input_length - len(input_edge_ids))\n assert len(input_ids) == len(attn_mask) == self.args.max_input_length == len(input_node_ids) == len(\n input_edge_ids)\n return input_ids, attn_mask, input_node_ids, input_edge_ids\n\n def ar_prep_data(self, questions, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n input_ids, input_attn_mask, input_node_ids, input_edge_ids = self.truncate_pair_ar(questions, add_bos_id,\n graph_ids, text_ids,\n node_ids, edge_ids)\n\n return input_ids, input_attn_mask, input_node_ids, input_edge_ids\n \n\n\n def __getitem__(self, idx):\n\n entry = self.data[idx]\n\n entities = []\n for _ in entry['kbs']:\n entities.append(_)\n\n strings_label = []\n node_ids = []\n edge_ids = []\n strings_label_tokens = ''\n\n # mark_entity: entities with KB numbers which are important for this task\n # text_entity: entities without KB numbers but only with text, which are less important\n mark_entity = [entry['kbs'][ele_entity][0] for ele_entity in entities]\n mark_entity_number = entities\n text_entity, text_relation = self.get_all_entities_per_sample(mark_entity_number, mark_entity, entry)\n entity_change, relation_change = self.get_change_per_sample(mark_entity, text_entity, text_relation)\n total_entity = mark_entity + text_entity\n adj_matrix = [[-1] * (self.args.max_node_length + 1) for _ in range(self.args.max_node_length + 1)]\n\n cnt_edge = 0\n\n if 'title' in entry:\n entity = self.knowledge[entry['title_kb_id']]\n string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix = self.linearize_v2(\n entity,\n entity_change,\n self.head_ids,\n self.rel_ids, self.tail_ids,\n relation_change, cnt_edge, adj_matrix)\n\n strings_label += string_label\n strings_label_tokens += string_label_tokens\n\n for i, entity_id in enumerate(entities):\n entity = entry['kbs'][entity_id]\n string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix = self.linearize_v2(\n entity,\n entity_change,\n self.head_ids,\n self.rel_ids, self.tail_ids,\n relation_change, cnt_edge, adj_matrix)\n \n strings_label += string_label\n strings_label_tokens += string_label_tokens\n node_ids += nodes\n edge_ids += edges\n \n if self.topology['relation-relation']:\n adj_matrix = self.relation_to_relation_fill(entity_change, relation_change, adj_matrix)\n \n\n words_label_ids, words_label_tokens, words_input_ids, words_input_tokens = [], '', [], ''\n\n\n input_ids_ar, attn_mask_ar, input_node_ids_ar, input_edge_ids_ar = \\\n self.ar_prep_data(strings_label, self.add_bos_id, self.graph_ids,\n self.text_ids, node_ids, edge_ids)\n\n node_length_ar = max(input_node_ids_ar) + 1\n edge_length_ar = max(input_edge_ids_ar) + 1\n \n\n def masked_fill(src, masked_value, fill_value):\n return [src[src_id] if src[src_id] != masked_value and src[src_id] < fill_value else fill_value for src_id\n in range(len(src))]\n\n input_node_ids_ar, input_edge_ids_ar = masked_fill(input_node_ids_ar, -1, self.args.max_node_length), \\\n masked_fill(input_edge_ids_ar, -1, self.args.max_edge_length)\n\n def masked_fill_matrix(adj_matrix_input, masked_value, fill_value):\n adj_matrix_tmp = copy.deepcopy(adj_matrix_input)\n for a_id in range(len(adj_matrix_tmp)):\n for b_id in range(len(adj_matrix_tmp)):\n if adj_matrix_tmp[a_id][b_id] == masked_value or adj_matrix_tmp[a_id][b_id] > fill_value:\n adj_matrix_tmp[a_id][b_id] = fill_value\n return adj_matrix_tmp\n\n adj_matrix_ar = masked_fill_matrix(adj_matrix, -1, self.args.max_edge_length)\n\n assert len(input_ids_ar) == len(attn_mask_ar) == self.args.max_input_length == len(input_node_ids_ar) == len(\n input_edge_ids_ar)\n\n input_ids_ar = torch.LongTensor(input_ids_ar)\n attn_mask_ar = torch.LongTensor(attn_mask_ar)\n \n input_node_ids_ar = torch.LongTensor(input_node_ids_ar)\n input_edge_ids_ar = torch.LongTensor(input_edge_ids_ar)\n node_length_ar = torch.LongTensor([node_length_ar])\n edge_length_ar = torch.LongTensor([edge_length_ar])\n adj_matrix_ar = torch.LongTensor(adj_matrix_ar)\n \n return input_ids_ar, attn_mask_ar, input_node_ids_ar, node_length_ar, adj_matrix_ar" }, { "identifier": "evaluate_bleu", "path": "GAP/data_relations_as_nodes.py", "snippet": "def evaluate_bleu(data_ref, data_sys):\n coco_eval = run_coco_eval(data_ref, data_sys)\n scores = {metric: score for metric, score in list(coco_eval.eval.items())}\n return scores[\"Bleu_4\"]" }, { "identifier": "get_t_emb_dim", "path": "GAP/data_relations_as_nodes.py", "snippet": "def get_t_emb_dim(args):\n t_emb_dim = int(args.entity_entity)+int(args.entity_relation)\\\n +int(args.relation_entity)+int(args.relation_relation)+1\n return t_emb_dim" }, { "identifier": "GAPBartForConditionalGeneration", "path": "GAP/modeling_gap_type.py", "snippet": "class GAPBartForConditionalGeneration(BartForConditionalGeneration):\n def __init__(self, config, **kwargs):\n super().__init__(config)\n base_model = GAPBartModel(config,**kwargs)\n self.model = base_model\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n \n def forward(self, input_ids, attention_mask=None, encoder_outputs=None,\n decoder_input_ids=None, decoder_attention_mask=None, input_node_ids=None,\n node_length=None, adj_matrix=None, decoder_whole_ids=None, decoder_cached_states=None,\n use_cache=False, is_training=False):\n\n if is_training:\n _decoder_input_ids = shift_tokens_right(decoder_input_ids, self.config.pad_token_id)\n else:\n _decoder_input_ids = decoder_input_ids\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n encoder_outputs=encoder_outputs,\n decoder_input_ids=_decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n input_node_ids=input_node_ids,\n node_length=node_length,\n adj_matrix=adj_matrix,\n decoder_cached_states=decoder_cached_states,\n use_cache=use_cache,\n )\n lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)\n if is_training:\n loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)\n loss = loss_fct(lm_logits.view(-1, self.config.vocab_size),\n decoder_input_ids.view(-1))\n return loss\n return (lm_logits, ) + outputs[1:]\n\n @torch.no_grad()\n def generate(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n max_length: Optional[int] = None,\n min_length: Optional[int] = None,\n do_sample: Optional[bool] = None,\n early_stopping: Optional[bool] = None,\n num_beams: Optional[int] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n repetition_penalty: Optional[float] = None,\n bad_words_ids: Optional[Iterable[int]] = None,\n bos_token_id: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n eos_token_id: Optional[int] = None,\n length_penalty: Optional[float] = None,\n no_repeat_ngram_size: Optional[int] = None,\n num_return_sequences: Optional[int] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n input_node_ids=None,\n node_length=None,\n adj_matrix=None,\n decoder_start_token_id: Optional[int] = None,\n use_cache: Optional[bool] = None,\n **model_specific_kwargs\n ) -> torch.LongTensor:\n r\"\"\" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.\n\n Adapted in part from `Facebook's XLM beam search code`_.\n\n .. _`Facebook's XLM beam search code`:\n https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529\n\n\n Parameters:\n\n input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`\n The sequence used as a prompt for the generation. If `None` the method initializes\n it as an empty `torch.LongTensor` of shape `(1,)`.\n\n max_length: (`optional`) int\n The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.\n\n min_length: (`optional`) int\n The min length of the sequence to be generated. Between 0 and infinity. Default to 0.\n\n do_sample: (`optional`) bool\n If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n early_stopping: (`optional`) bool\n if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n num_beams: (`optional`) int\n Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.\n\n temperature: (`optional`) float\n The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.\n\n top_k: (`optional`) int\n The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.\n\n top_p: (`optional`) float\n The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.\n\n repetition_penalty: (`optional`) float\n The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.\n\n pad_token_id: (`optional`) int\n Padding token. Default to specicic model pad_token_id or None if it does not exist.\n\n bos_token_id: (`optional`) int\n BOS token. Defaults to `bos_token_id` as defined in the models config.\n\n eos_token_id: (`optional`) int\n EOS token. Defaults to `eos_token_id` as defined in the models config.\n\n length_penalty: (`optional`) float\n Exponential penalty to the length. Default to 1.\n\n no_repeat_ngram_size: (`optional`) int\n If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.\n bad_words_ids: (`optional`) list of lists of int\n `bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.\n\n num_return_sequences: (`optional`) int\n The number of independently computed returned sequences for each element in the batch. Default to 1.\n\n attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n Defaults to `None`.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n decoder_start_token_id=None: (`optional`) int\n Start token id for the decoder. Defaults to ``decoder_start_token_id`` as defined the model's config or to the ``bos_token_id``\n if no ``decoder_start_token_id`` is found in the config.\n This is only relevant for encoder-decoder models.\n\n use_cache: (`optional`) bool\n If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.\n\n model_specific_kwargs: (`optional`) dict\n Additional model specific kwargs will be forwarded to the `forward` function of the model.\n\n Return:\n\n output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`\n sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`\n\n Examples::\n\n from transformers import AutoTokenizer, AutoModelForCausalLM\n\n tokenizer = AutoTokenizer. ('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n outputs = model.generate(max_length=40) # do greedy decoding\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # 3 generate sequences using by sampling\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('ctrl') # Download model and configuration from S3 and cache.\n input_context = 'Legal My neighbor is' # \"Legal\" is one of the control codes for ctrl\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('gpt2') # Download model and configuration from S3 and cache.\n input_context = 'My cute dog' # \"Legal\" is one of the control codes for ctrl\n bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated\n \"\"\"\n\n # We cannot generate if the model does not have a LM head\n if self.get_output_embeddings() is None:\n raise AttributeError(\n \"You tried to generate sequences with a model that does not have a LM Head.\"\n \"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )\"\n )\n\n max_length = max_length if max_length is not None else self.config.max_length\n min_length = min_length if min_length is not None else self.config.min_length\n do_sample = do_sample if do_sample is not None else self.config.do_sample\n early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n temperature = temperature if temperature is not None else self.config.temperature\n top_k = top_k if top_k is not None else self.config.top_k\n top_p = top_p if top_p is not None else self.config.top_p\n repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty\n bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id\n pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id\n length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty\n no_repeat_ngram_size = (\n no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size\n )\n bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids\n num_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n decoder_start_token_id = (\n decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id\n )\n\n if input_ids is not None:\n batch_size = input_ids.shape[0] # overriden by the input batch_size\n else:\n batch_size = 1\n\n assert isinstance(max_length, int) and max_length > 0, \"`max_length` should be a strictly positive integer.\"\n assert isinstance(min_length, int) and min_length >= 0, \"`min_length` should be a positive integer.\"\n assert isinstance(do_sample, bool), \"`do_sample` should be a boolean.\"\n assert isinstance(early_stopping, bool), \"`early_stopping` should be a boolean.\"\n assert isinstance(use_cache, bool), \"`use_cache` should be a boolean.\"\n assert isinstance(num_beams, int) and num_beams > 0, \"`num_beams` should be a strictly positive integer.\"\n assert temperature > 0, \"`temperature` should be strictly positive.\"\n assert isinstance(top_k, int) and top_k >= 0, \"`top_k` should be a positive integer.\"\n assert 0 <= top_p <= 1, \"`top_p` should be between 0 and 1.\"\n assert repetition_penalty >= 1.0, \"`repetition_penalty` should be >= 1.\"\n assert input_ids is not None or (\n isinstance(bos_token_id, int) and bos_token_id >= 0\n ), \"If input_ids is not defined, `bos_token_id` should be a positive integer.\"\n assert pad_token_id is None or (\n isinstance(pad_token_id, int) and (pad_token_id >= 0)\n ), \"`pad_token_id` should be a positive integer.\"\n assert (eos_token_id is None) or (\n isinstance(eos_token_id, int) and (eos_token_id >= 0)\n ), \"`eos_token_id` should be a positive integer.\"\n assert length_penalty > 0, \"`length_penalty` should be strictly positive.\"\n assert (\n isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0\n ), \"`no_repeat_ngram_size` should be a positive integer.\"\n assert (\n isinstance(num_return_sequences, int) and num_return_sequences > 0\n ), \"`num_return_sequences` should be a strictly positive integer.\"\n assert (\n bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)\n ), \"`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated\"\n\n if input_ids is None:\n assert isinstance(bos_token_id, int) and bos_token_id >= 0, (\n \"you should either supply a context to complete as `input_ids` input \"\n \"or a `bos_token_id` (integer >= 0) as a first token to start the generation.\"\n )\n input_ids = torch.full(\n (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,\n )\n else:\n assert input_ids.dim() == 2, \"Input prompt should be of shape (batch_size, sequence length).\"\n\n # not allow to duplicate outputs when greedy decoding\n if do_sample is False:\n if num_beams == 1:\n # no_beam_search greedy generation conditions\n assert (\n num_return_sequences == 1\n ), \"Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1\"\n\n else:\n # beam_search greedy generation conditions\n assert (\n num_beams >= num_return_sequences\n ), \"Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences\"\n\n # create attention mask if necessary\n # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140\n if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):\n attention_mask = input_ids.ne(pad_token_id).long()\n elif attention_mask is None:\n attention_mask = input_ids.new_ones(input_ids.shape)\n\n # set pad_token_id to eos_token_id if not set. Important that this is done after\n # attention_mask is created\n if pad_token_id is None and eos_token_id is not None:\n logger.warning(\n \"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence\".format(eos_token_id)\n )\n pad_token_id = eos_token_id\n\n # current position and vocab size\n if hasattr(self.config, \"vocab_size\"):\n vocab_size = self.config.vocab_size\n elif (\n self.config.is_encoder_decoder\n and hasattr(self.config, \"decoder\")\n and hasattr(self.config.decoder, \"vocab_size\")\n ):\n vocab_size = self.config.decoder.vocab_size\n\n # set effective batch size and effective batch multiplier according to do_sample\n if do_sample:\n effective_batch_size = batch_size * num_return_sequences\n effective_batch_mult = num_return_sequences\n else:\n effective_batch_size = batch_size\n effective_batch_mult = 1\n\n if self.config.is_encoder_decoder:\n if decoder_start_token_id is None:\n decoder_start_token_id = bos_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation\"\n assert hasattr(self, \"get_encoder\"), \"{} should have a 'get_encoder' function defined\".format(self)\n assert callable(self.get_encoder), \"{} should be a method\".format(self.get_encoder)\n\n # get encoder and store encoder outputs\n encoder = self.get_encoder()\n\n # add structural information when encoding\n encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask, input_node_ids=input_node_ids,\n node_length=node_length, adj_matrix=adj_matrix)\n\n # Expand input ids if num_beams > 1 or num_return_sequences > 1\n if num_return_sequences > 1 or num_beams > 1:\n input_ids_len = input_ids.shape[-1]\n input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)\n attention_mask = attention_mask.unsqueeze(1).expand(\n batch_size, effective_batch_mult * num_beams, input_ids_len\n )\n\n input_ids = input_ids.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n attention_mask = attention_mask.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n\n if self.config.is_encoder_decoder:\n # create empty decoder_input_ids\n input_ids = torch.full(\n (effective_batch_size * num_beams, 1),\n decoder_start_token_id,\n dtype=torch.long,\n device=next(self.parameters()).device,\n )\n cur_len = 1\n\n assert (\n batch_size == encoder_outputs[0].shape[0]\n ), f\"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} \"\n\n # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)\n expanded_batch_idxs = (\n torch.arange(batch_size)\n .view(-1, 1)\n .repeat(1, num_beams * effective_batch_mult)\n .view(-1)\n .to(input_ids.device)\n )\n # expand encoder_outputs\n encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])\n\n else:\n encoder_outputs = None\n cur_len = input_ids.shape[-1]\n\n if num_beams > 1:\n output = self._generate_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n early_stopping=early_stopping,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n num_return_sequences=num_return_sequences,\n length_penalty=length_penalty,\n num_beams=num_beams,\n vocab_size=vocab_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n else:\n output = self._generate_no_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n\n return output" }, { "identifier": "GAPBartForConditionalGeneration", "path": "GAP/modeling_gap.py", "snippet": "class GAPBartForConditionalGeneration(BartForConditionalGeneration):\n def __init__(self, config):\n super().__init__(config)\n base_model = GAPBartModel(config)\n self.model = base_model\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n\n def forward(self, input_ids, attention_mask=None, encoder_outputs=None,\n decoder_input_ids=None, decoder_attention_mask=None, input_node_ids=None, \n node_length=None, adj_matrix=None, decoder_whole_ids=None, decoder_cached_states=None,\n use_cache=False, is_training=False):\n\n if is_training:\n _decoder_input_ids = shift_tokens_right(decoder_input_ids, self.config.pad_token_id)\n else:\n _decoder_input_ids = decoder_input_ids\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n encoder_outputs=encoder_outputs,\n decoder_input_ids=_decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n input_node_ids=input_node_ids,\n node_length=node_length,\n adj_matrix=adj_matrix,\n decoder_cached_states=decoder_cached_states,\n use_cache=use_cache,\n )\n lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)\n if is_training:\n loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)\n loss = loss_fct(lm_logits.view(-1, self.config.vocab_size),\n decoder_input_ids.view(-1))\n return loss\n return (lm_logits, ) + outputs[1:]\n\n @torch.no_grad()\n def generate(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n max_length: Optional[int] = None,\n min_length: Optional[int] = None,\n do_sample: Optional[bool] = None,\n early_stopping: Optional[bool] = None,\n num_beams: Optional[int] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n repetition_penalty: Optional[float] = None,\n bad_words_ids: Optional[Iterable[int]] = None,\n bos_token_id: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n eos_token_id: Optional[int] = None,\n length_penalty: Optional[float] = None,\n no_repeat_ngram_size: Optional[int] = None,\n num_return_sequences: Optional[int] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n input_node_ids=None,\n node_length=None,\n adj_matrix=None,\n decoder_start_token_id: Optional[int] = None,\n use_cache: Optional[bool] = None,\n **model_specific_kwargs\n ) -> torch.LongTensor:\n r\"\"\" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.\n\n Adapted in part from `Facebook's XLM beam search code`_.\n\n .. _`Facebook's XLM beam search code`:\n https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529\n\n\n Parameters:\n\n input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`\n The sequence used as a prompt for the generation. If `None` the method initializes\n it as an empty `torch.LongTensor` of shape `(1,)`.\n\n max_length: (`optional`) int\n The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.\n\n min_length: (`optional`) int\n The min length of the sequence to be generated. Between 0 and infinity. Default to 0.\n\n do_sample: (`optional`) bool\n If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n early_stopping: (`optional`) bool\n if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n num_beams: (`optional`) int\n Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.\n\n temperature: (`optional`) float\n The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.\n\n top_k: (`optional`) int\n The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.\n\n top_p: (`optional`) float\n The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.\n\n repetition_penalty: (`optional`) float\n The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.\n\n pad_token_id: (`optional`) int\n Padding token. Default to specicic model pad_token_id or None if it does not exist.\n\n bos_token_id: (`optional`) int\n BOS token. Defaults to `bos_token_id` as defined in the models config.\n\n eos_token_id: (`optional`) int\n EOS token. Defaults to `eos_token_id` as defined in the models config.\n\n length_penalty: (`optional`) float\n Exponential penalty to the length. Default to 1.\n\n no_repeat_ngram_size: (`optional`) int\n If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.\n bad_words_ids: (`optional`) list of lists of int\n `bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.\n\n num_return_sequences: (`optional`) int\n The number of independently computed returned sequences for each element in the batch. Default to 1.\n\n attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n Defaults to `None`.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n decoder_start_token_id=None: (`optional`) int\n Start token id for the decoder. Defaults to ``decoder_start_token_id`` as defined the model's config or to the ``bos_token_id``\n if no ``decoder_start_token_id`` is found in the config.\n This is only relevant for encoder-decoder models.\n\n use_cache: (`optional`) bool\n If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.\n\n model_specific_kwargs: (`optional`) dict\n Additional model specific kwargs will be forwarded to the `forward` function of the model.\n\n Return:\n\n output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`\n sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`\n\n Examples::\n\n from transformers import AutoTokenizer, AutoModelForCausalLM\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n outputs = model.generate(max_length=40) # do greedy decoding\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # 3 generate sequences using by sampling\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('ctrl') # Download model and configuration from S3 and cache.\n input_context = 'Legal My neighbor is' # \"Legal\" is one of the control codes for ctrl\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('gpt2') # Download model and configuration from S3 and cache.\n input_context = 'My cute dog' # \"Legal\" is one of the control codes for ctrl\n bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated\n \"\"\"\n\n # We cannot generate if the model does not have a LM head\n if self.get_output_embeddings() is None:\n raise AttributeError(\n \"You tried to generate sequences with a model that does not have a LM Head.\"\n \"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )\"\n )\n\n max_length = max_length if max_length is not None else self.config.max_length\n min_length = min_length if min_length is not None else self.config.min_length\n do_sample = do_sample if do_sample is not None else self.config.do_sample\n early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n temperature = temperature if temperature is not None else self.config.temperature\n top_k = top_k if top_k is not None else self.config.top_k\n top_p = top_p if top_p is not None else self.config.top_p\n repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty\n bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id\n pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id\n length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty\n no_repeat_ngram_size = (\n no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size\n )\n bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids\n num_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n decoder_start_token_id = (\n decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id\n )\n\n if input_ids is not None:\n batch_size = input_ids.shape[0] # overriden by the input batch_size\n else:\n batch_size = 1\n\n assert isinstance(max_length, int) and max_length > 0, \"`max_length` should be a strictly positive integer.\"\n assert isinstance(min_length, int) and min_length >= 0, \"`min_length` should be a positive integer.\"\n assert isinstance(do_sample, bool), \"`do_sample` should be a boolean.\"\n assert isinstance(early_stopping, bool), \"`early_stopping` should be a boolean.\"\n assert isinstance(use_cache, bool), \"`use_cache` should be a boolean.\"\n assert isinstance(num_beams, int) and num_beams > 0, \"`num_beams` should be a strictly positive integer.\"\n assert temperature > 0, \"`temperature` should be strictly positive.\"\n assert isinstance(top_k, int) and top_k >= 0, \"`top_k` should be a positive integer.\"\n assert 0 <= top_p <= 1, \"`top_p` should be between 0 and 1.\"\n assert repetition_penalty >= 1.0, \"`repetition_penalty` should be >= 1.\"\n assert input_ids is not None or (\n isinstance(bos_token_id, int) and bos_token_id >= 0\n ), \"If input_ids is not defined, `bos_token_id` should be a positive integer.\"\n assert pad_token_id is None or (\n isinstance(pad_token_id, int) and (pad_token_id >= 0)\n ), \"`pad_token_id` should be a positive integer.\"\n assert (eos_token_id is None) or (\n isinstance(eos_token_id, int) and (eos_token_id >= 0)\n ), \"`eos_token_id` should be a positive integer.\"\n assert length_penalty > 0, \"`length_penalty` should be strictly positive.\"\n assert (\n isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0\n ), \"`no_repeat_ngram_size` should be a positive integer.\"\n assert (\n isinstance(num_return_sequences, int) and num_return_sequences > 0\n ), \"`num_return_sequences` should be a strictly positive integer.\"\n assert (\n bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)\n ), \"`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated\"\n\n if input_ids is None:\n assert isinstance(bos_token_id, int) and bos_token_id >= 0, (\n \"you should either supply a context to complete as `input_ids` input \"\n \"or a `bos_token_id` (integer >= 0) as a first token to start the generation.\"\n )\n input_ids = torch.full(\n (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,\n )\n else:\n assert input_ids.dim() == 2, \"Input prompt should be of shape (batch_size, sequence length).\"\n\n # not allow to duplicate outputs when greedy decoding\n if do_sample is False:\n if num_beams == 1:\n # no_beam_search greedy generation conditions\n assert (\n num_return_sequences == 1\n ), \"Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1\"\n\n else:\n # beam_search greedy generation conditions\n assert (\n num_beams >= num_return_sequences\n ), \"Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences\"\n\n # create attention mask if necessary\n # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140\n if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):\n attention_mask = input_ids.ne(pad_token_id).long()\n elif attention_mask is None:\n attention_mask = input_ids.new_ones(input_ids.shape)\n\n # set pad_token_id to eos_token_id if not set. Important that this is done after\n # attention_mask is created\n if pad_token_id is None and eos_token_id is not None:\n logger.warning(\n \"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence\".format(eos_token_id)\n )\n pad_token_id = eos_token_id\n\n # current position and vocab size\n if hasattr(self.config, \"vocab_size\"):\n vocab_size = self.config.vocab_size\n elif (\n self.config.is_encoder_decoder\n and hasattr(self.config, \"decoder\")\n and hasattr(self.config.decoder, \"vocab_size\")\n ):\n vocab_size = self.config.decoder.vocab_size\n\n # set effective batch size and effective batch multiplier according to do_sample\n if do_sample:\n effective_batch_size = batch_size * num_return_sequences\n effective_batch_mult = num_return_sequences\n else:\n effective_batch_size = batch_size\n effective_batch_mult = 1\n\n if self.config.is_encoder_decoder:\n if decoder_start_token_id is None:\n decoder_start_token_id = bos_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation\"\n assert hasattr(self, \"get_encoder\"), \"{} should have a 'get_encoder' function defined\".format(self)\n assert callable(self.get_encoder), \"{} should be a method\".format(self.get_encoder)\n\n # get encoder and store encoder outputs\n encoder = self.get_encoder()\n\n # add structural information when encoding\n encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask, input_node_ids=input_node_ids,\n node_length=node_length, adj_matrix=adj_matrix)\n\n # Expand input ids if num_beams > 1 or num_return_sequences > 1\n if num_return_sequences > 1 or num_beams > 1:\n input_ids_len = input_ids.shape[-1]\n input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)\n attention_mask = attention_mask.unsqueeze(1).expand(\n batch_size, effective_batch_mult * num_beams, input_ids_len\n )\n\n input_ids = input_ids.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n attention_mask = attention_mask.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n\n if self.config.is_encoder_decoder:\n # create empty decoder_input_ids\n input_ids = torch.full(\n (effective_batch_size * num_beams, 1),\n decoder_start_token_id,\n dtype=torch.long,\n device=next(self.parameters()).device,\n )\n cur_len = 1\n\n assert (\n batch_size == encoder_outputs[0].shape[0]\n ), f\"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} \"\n\n # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)\n expanded_batch_idxs = (\n torch.arange(batch_size)\n .view(-1, 1)\n .repeat(1, num_beams * effective_batch_mult)\n .view(-1)\n .to(input_ids.device)\n )\n # expand encoder_outputs\n encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])\n\n else:\n encoder_outputs = None\n cur_len = input_ids.shape[-1]\n\n if num_beams > 1:\n output = self._generate_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n early_stopping=early_stopping,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n num_return_sequences=num_return_sequences,\n length_penalty=length_penalty,\n num_beams=num_beams,\n vocab_size=vocab_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n else:\n output = self._generate_no_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n\n return output" } ]
import os import json import numpy as np import pandas as pd import torch import random from collections import defaultdict from transformers import BartTokenizer, T5Tokenizer from transformers import AdamW, get_linear_schedule_with_warmup from utils import * from scoring.fluency_scorer import FluencyScorer from scoring.saliency_scorer import SaliencyBERTScore from scoring.simplicity_scorer import SimplicityTextScore from scoring.guardrails import * from scoring.aggregate_scorer import ScorerWrapper from GAP.data_relations_as_nodes import GAPDataloader, EventDataset, WebNLGDataset from GAP.data_relations_as_nodes import evaluate_bleu, get_t_emb_dim from tqdm import tqdm, trange from rake_nltk import Rake from evaluate import load from sentence_similarity import sentence_similarity from GAP.modeling_gap_type import GAPBartForConditionalGeneration as GAP_Type_model from GAP.modeling_gap import GAPBartForConditionalGeneration as GAP_model
21,465
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args) model = GAP_Type_model.from_pretrained(checkpoint,t_emb_dim=t_emb_dim) else: model = GAP_model.from_pretrained(checkpoint) if torch.cuda.is_available(): model.to(torch.device("cuda")) # Here let's put all the scorers and make a "score" function for each. scores = [{"name": "fluency", "model": FluencyScorer(1, log=True, laplace_smooth=True, prob_dict_path="data/wiki/enwiki/enwiki_terms_with_punc.csv"), "sign": 1, "weight": 1.0},
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args) model = GAP_Type_model.from_pretrained(checkpoint,t_emb_dim=t_emb_dim) else: model = GAP_model.from_pretrained(checkpoint) if torch.cuda.is_available(): model.to(torch.device("cuda")) # Here let's put all the scorers and make a "score" function for each. scores = [{"name": "fluency", "model": FluencyScorer(1, log=True, laplace_smooth=True, prob_dict_path="data/wiki/enwiki/enwiki_terms_with_punc.csv"), "sign": 1, "weight": 1.0},
{"name": "simple_text_score", "model": SimplicityTextScore(), "sign": 1, "weight": 1.0},
2
2023-10-24 13:24:23+00:00
24k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=None,\n types=None,\n post=False,\n loop=None,\n ):\n Judge.clear()\n self._judges = get_judges(judges, timeout, verify_ssl)\n self._method = 'POST' if post else 'GET'\n self._max_tries = max_tries\n self._real_ext_ip = real_ext_ip\n self._strict = strict\n self._dnsbl = dnsbl or []\n self._types = types or {}\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = Resolver(loop=self._loop)\n\n self._req_http_proto = not types or bool(\n ('HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5') & types.keys()\n )\n self._req_https_proto = not types or bool(('HTTPS',) & types.keys())\n self._req_smtp_proto = not types or bool(('CONNECT:25',) & types.keys()) # noqa\n\n self._ngtrs = {proto for proto in types or NGTRS}\n\n async def check_judges(self):\n # TODO: need refactoring\n log.debug('Start check judges')\n stime = time.time()\n await asyncio.gather(\n *[j.check(real_ext_ip=self._real_ext_ip) for j in self._judges]\n )\n\n self._judges = [j for j in self._judges if j.is_working]\n log.debug(\n '%d judges added. Runtime: %.4f;' % (len(self._judges), time.time() - stime)\n )\n\n nojudges = []\n disable_protocols = []\n\n if len(Judge.available['HTTP']) == 0:\n nojudges.append('HTTP')\n disable_protocols.extend(['HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5'])\n self._req_http_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTP'].set()\n if len(Judge.available['HTTPS']) == 0:\n nojudges.append('HTTPS')\n disable_protocols.append('HTTPS')\n self._req_https_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTPS'].set()\n if len(Judge.available['SMTP']) == 0:\n # nojudges.append('SMTP')\n disable_protocols.append('SMTP')\n self._req_smtp_proto = False\n # for coroutines, which is already waiting\n Judge.ev['SMTP'].set()\n\n for proto in disable_protocols:\n if proto in self._ngtrs:\n self._ngtrs.remove(proto)\n\n if nojudges:\n warnings.warn(\n 'Not found judges for the {nojudges} protocol.\\n'\n 'Checking proxy on protocols {disp} is disabled.'.format(\n nojudges=nojudges, disp=disable_protocols\n ),\n UserWarning,\n )\n if self._judges:\n log.debug('Loaded: %d proxy judges' % len(set(self._judges)))\n else:\n RuntimeError('Not found judges')\n\n def _types_passed(self, proxy):\n if not self._types:\n return True\n for proto, lvl in proxy.types.copy().items():\n req_levels = self._types.get(proto)\n if not req_levels or (lvl in req_levels):\n if not self._strict:\n return True\n else:\n if self._strict:\n del proxy.types[proto]\n if self._strict and proxy.types:\n return True\n proxy.log('Protocol or the level of anonymity differs from the requested')\n return False\n\n async def _in_DNSBL(self, host):\n _host = '.'.join(reversed(host.split('.'))) # reverse address\n tasks = []\n for domain in self._dnsbl:\n query = '.'.join([_host, domain])\n tasks.append(self._resolver.resolve(query, logging=False))\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n if any([r for r in responses if not isinstance(r, ResolveError)]):\n return True\n return False\n\n async def check(self, proxy):\n if self._dnsbl:\n if await self._in_DNSBL(proxy.host):\n proxy.log('Found in DNSBL')\n return False\n\n if self._req_http_proto:\n await Judge.ev['HTTP'].wait()\n if self._req_https_proto:\n await Judge.ev['HTTPS'].wait()\n if self._req_smtp_proto:\n await Judge.ev['SMTP'].wait()\n\n if proxy.expected_types:\n ngtrs = proxy.expected_types & self._ngtrs\n else:\n ngtrs = self._ngtrs\n\n results = []\n for proto in ngtrs:\n if proto == 'CONNECT:25':\n result = await self._check_conn_25(proxy, proto)\n else:\n result = await self._check(proxy, proto)\n results.append(result)\n\n proxy.is_working = True if any(results) else False\n\n if proxy.is_working and self._types_passed(proxy):\n return True\n return False\n\n async def _check_conn_25(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n proxy.types[proxy.ngtr.name] = None\n result = True\n break\n finally:\n proxy.close()\n return result\n\n async def _check(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n headers, content, rv = await _send_test_request(\n self._method, proxy, judge\n )\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n content = _decompress_content(headers, content)\n result = _check_test_response(proxy, headers, content, rv)\n if result:\n if proxy.ngtr.check_anon_lvl:\n lvl = _get_anonymity_lvl(\n self._real_ext_ip, proxy, judge, content\n )\n else:\n lvl = None\n proxy.types[proxy.ngtr.name] = lvl\n break\n finally:\n proxy.close()\n return result" }, { "identifier": "ResolveError", "path": "proxyhub/errors.py", "snippet": "class ResolveError(Exception):\n pass" }, { "identifier": "PROVIDERS", "path": "proxyhub/providers.py", "snippet": "PROVIDERS = [\n Provider(\n url='http://www.proxylists.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 49\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks4',\n proto=('SOCKS4'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks5',\n proto=('SOCKS5'),\n ), # added by ZerGo0\n Provider(\n url='http://ipaddress.com/proxy-list/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 53\n Provider(\n url='https://www.sslproxies.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 100\n Provider(\n url='https://freshfreeproxylist.wordpress.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 50\n Provider(\n url='http://proxytime.ru/http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 1400\n Provider(\n url='https://free-proxy-list.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 300\n Provider(\n url='https://us-proxy.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://fineproxy.org/eng/fresh-proxies/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 5500\n Provider(url='https://socks-proxy.net/', proto=('SOCKS4', 'SOCKS5')), # 80\n Provider(\n url='http://www.httptunnel.ge/ProxyListForFree.aspx',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://cn-proxy.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 70\n Provider(\n url='https://hugeproxies.com/home/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 800\n Provider(\n url='http://proxy.rufey.ru/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 153\n Provider(\n url='https://geekelectronics.org/my-servisy/proxy',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 400\n Provider(\n url='http://pubproxy.com/api/proxy?limit=20&format=txt',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 20\n Proxy_list_org(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 140\n Xseo_in(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 240\n Spys_ru(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 660\n Proxylistplus_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 450\n Proxylist_me(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 2872\n Foxtools_ru(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'), max_conn=1\n ), # noqa; 500\n Gatherproxy_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 3212\n Nntime_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 1050\n Blogspot_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 24800\n Gatherproxy_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 30\n Blogspot_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1486\n Tools_rosinstrument_com(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')\n ), # noqa; 4000\n Tools_rosinstrument_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1800\n My_proxy_com(max_conn=2), # noqa; 1000\n Checkerproxy_net(), # noqa; 60000\n Aliveproxy_com(), # noqa; 210\n Freeproxylists_com(), # noqa; 1338\n Webanetlabs_net(), # noqa; 5000\n Maxiproxies_com(), # noqa; 430\n Proxylist_download(), # noqa; 35590\n # # Bad...\n # http://www.proxylist.ro/\n # Provider(url='http://proxydb.net/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS',\n # 'CONNECT:25', 'SOCKS4', 'SOCKS5')),\n # Provider(url='http://www.cybersyndrome.net/pla6.html',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 1100\n # Provider(url='https://www.ip-adress.com/proxy-list',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 57\n # Provider(url='https://www.marcosbl.com/lab/proxies/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 89\n # Provider(url='http://go4free.xyz/Free-Proxy/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 196\n # Provider(url='http://blackstarsecurity.com/proxy-list.txt'), # 7014\n # Provider(url='http://www.get-proxy.net/proxy-archives'), # 519\n # Proxyb_net(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 857\n # Proxz_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n # max_conn=2), # 443\n # Proxynova_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 818\n # _50kproxies_com(), # 822\n # Free_proxy_cz(), # 420\n]" }, { "identifier": "Provider", "path": "proxyhub/providers.py", "snippet": "class Provider:\n \"\"\"Proxy provider.\n\n Provider - a website that publish free public proxy lists.\n\n :param str url: Url of page where to find proxies\n :param tuple proto:\n (optional) List of the types (protocols) that may be supported\n by proxies returned by the provider. Then used as :attr:`Proxy.types`\n :param int max_conn:\n (optional) The maximum number of concurrent connections on the provider\n :param int max_tries:\n (optional) The maximum number of attempts to receive response\n :param int timeout:\n (optional) Timeout of a request in seconds\n \"\"\"\n\n _pattern = IPPortPatternGlobal\n\n def __init__(\n self, url=None, proto=(), max_conn=4, max_tries=3, timeout=20, loop=None\n ):\n if url:\n self.domain = urlparse(url).netloc\n self.url = url\n self.proto = proto\n self._max_tries = max_tries\n self._timeout = timeout\n self._session = None\n self._cookies = {}\n self._proxies = set()\n # concurrent connections on the current provider\n self._sem_provider = asyncio.Semaphore(max_conn)\n self._loop = loop or asyncio.get_event_loop()\n\n @property\n def proxies(self):\n \"\"\"Return all found proxies.\n\n :return:\n Set of tuples with proxy hosts, ports and types (protocols)\n that may be supported (from :attr:`.proto`).\n\n For example:\n {('192.168.0.1', '80', ('HTTP', 'HTTPS'), ...)}\n\n :rtype: set\n \"\"\"\n return self._proxies\n\n @proxies.setter\n def proxies(self, new):\n new = [(host, port, self.proto) for host, port in new if port]\n self._proxies.update(new)\n\n async def get_proxies(self):\n \"\"\"Receive proxies from the provider and return them.\n\n :return: :attr:`.proxies`\n \"\"\"\n log.debug('Try to get proxies from %s' % self.domain)\n\n async with aiohttp.ClientSession(\n headers=get_headers(), cookies=self._cookies, loop=self._loop\n ) as self._session:\n await self._pipe()\n\n log.debug(\n '%d proxies received from %s: %s'\n % (len(self.proxies), self.domain, self.proxies)\n )\n return self.proxies\n\n async def _pipe(self):\n await self._find_on_page(self.url)\n\n async def _find_on_pages(self, urls):\n if not urls:\n return\n tasks = []\n if not isinstance(urls[0], dict):\n urls = set(urls)\n for url in urls:\n if isinstance(url, dict):\n tasks.append(self._find_on_page(**url))\n else:\n tasks.append(self._find_on_page(url))\n await asyncio.gather(*tasks)\n\n async def _find_on_page(self, url, data=None, headers=None, method='GET'):\n page = await self.get(url, data=data, headers=headers, method=method)\n oldcount = len(self.proxies)\n try:\n received = self.find_proxies(page)\n except Exception as e:\n received = []\n log.error(\n 'Error when executing find_proxies.'\n 'Domain: %s; Error: %r' % (self.domain, e)\n )\n self.proxies = received\n added = len(self.proxies) - oldcount\n log.debug(\n '%d(%d) proxies added(received) from %s' % (added, len(received), url)\n )\n\n async def get(self, url, data=None, headers=None, method='GET'):\n for _ in range(self._max_tries):\n page = await self._get(url, data=data, headers=headers, method=method)\n if page:\n break\n return page\n\n async def _get(self, url, data=None, headers=None, method='GET'):\n page = ''\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with self._sem_provider, self._session.request(\n method, url, data=data, headers=headers, timeout=timeout\n ) as resp:\n page = await resp.text()\n if resp.status != 200:\n log.debug(\n 'url: %s\\nheaders: %s\\ncookies: %s\\npage:\\n%s'\n % (url, resp.headers, resp.cookies, page)\n )\n raise BadStatusError('Status: %s' % resp.status)\n except (\n UnicodeDecodeError,\n BadStatusError,\n asyncio.TimeoutError,\n aiohttp.ClientOSError,\n aiohttp.ClientResponseError,\n aiohttp.ServerDisconnectedError,\n ) as e:\n page = ''\n log.debug('%s is failed. Error: %r;' % (url, e))\n return page\n\n def find_proxies(self, page):\n return self._find_proxies(page)\n\n def _find_proxies(self, page):\n proxies = self._pattern.findall(page)\n return proxies" }, { "identifier": "Proxy", "path": "proxyhub/proxy.py", "snippet": "class Proxy:\n \"\"\"Proxy.\n\n :param str host: IP address of the proxy\n :param int port: Port of the proxy\n :param tuple types:\n (optional) List of types (protocols) which may be supported\n by the proxy and which can be checked to work with the proxy\n :param int timeout:\n (optional) Timeout of a connection and receive a response in seconds\n :param bool verify_ssl:\n (optional) Flag indicating whether to check the SSL certificates.\n Set to True to check ssl certifications\n\n :raises ValueError: If the host not is IP address, or if the port > 65535\n \"\"\"\n\n @classmethod\n async def create(cls, host, *args, **kwargs):\n \"\"\"Asynchronously create a :class:`Proxy` object.\n\n :param str host: A passed host can be a domain or IP address.\n If the host is a domain, try to resolve it\n :param str *args:\n (optional) Positional arguments that :class:`Proxy` takes\n :param str **kwargs:\n (optional) Keyword arguments that :class:`Proxy` takes\n\n :return: :class:`Proxy` object\n :rtype: proxyhub.Proxy\n\n :raises ResolveError: If could not resolve the host\n :raises ValueError: If the port > 65535\n \"\"\" # noqa: W605\n loop = kwargs.pop('loop', None)\n resolver = kwargs.pop('resolver', Resolver(loop=loop))\n try:\n _host = await resolver.resolve(host)\n self = cls(_host, *args, **kwargs)\n except (ResolveError, ValueError) as e:\n log.error('%s:%s: Error at creating: %s' % (host, args[0], e))\n raise\n return self\n\n def __init__(self, host=None, port=None, types=(), timeout=8, verify_ssl=False):\n self.host = host\n if not Resolver.host_is_ip(self.host):\n raise ValueError(\n 'The host of proxy should be the IP address. '\n 'Try Proxy.create() if the host is a domain'\n )\n\n self.port = int(port)\n if self.port > 65535:\n raise ValueError('The port of proxy cannot be greater than 65535')\n\n self.expected_types = set(types) & {\n 'HTTP',\n 'HTTPS',\n 'CONNECT:80',\n 'CONNECT:25',\n 'SOCKS4',\n 'SOCKS5',\n }\n self._timeout = timeout\n self._ssl_context = True if verify_ssl else _ssl._create_unverified_context()\n self._types = {}\n self._is_working = False\n self.stat = {'requests': 0, 'errors': Counter()}\n self._ngtr = None\n self._geo = Resolver.get_ip_info(self.host)\n self._log = []\n self._runtimes = []\n self._schemes = ()\n self._closed = True\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n\n def __repr__(self):\n \"\"\"Class representation\n e.g. <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080>\n \"\"\"\n tpinfo = []\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n s = '{tp}: {lvl}' if lvl else '{tp}'\n s = s.format(tp=tp, lvl=lvl)\n tpinfo.append(s)\n tpinfo = ', '.join(tpinfo)\n return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format(\n code=self._geo.code,\n types=tpinfo,\n host=self.host,\n port=self.port,\n avg=self.avg_resp_time,\n )\n\n @property\n def types(self):\n \"\"\"Types (protocols) supported by the proxy.\n\n | Where key is type, value is level of anonymity\n (only for HTTP, for other types level always is None).\n | Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25\n | Available levels: Transparent, Anonymous, High.\n\n :rtype: dict\n \"\"\"\n return self._types\n\n @property\n def is_working(self):\n \"\"\"True if the proxy is working, False otherwise.\n\n :rtype: bool\n \"\"\"\n return self._is_working\n\n @is_working.setter\n def is_working(self, val):\n self._is_working = val\n\n @property\n def writer(self):\n return self._writer.get('ssl') or self._writer.get('conn')\n\n @property\n def reader(self):\n return self._reader.get('ssl') or self._reader.get('conn')\n\n @property\n def priority(self):\n return (self.error_rate, self.avg_resp_time)\n\n @property\n def error_rate(self):\n \"\"\"Error rate: from 0 to 1.\n\n For example: 0.7 = 70% requests ends with error.\n\n :rtype: float\n\n .. versionadded:: 0.2.0\n \"\"\"\n if not self.stat['requests']:\n return 0\n return round(sum(self.stat['errors'].values()) / self.stat['requests'], 2)\n\n @property\n def schemes(self):\n \"\"\"Return supported schemes.\"\"\"\n if not self._schemes:\n _schemes = []\n if self.types.keys() & _HTTP_PROTOS:\n _schemes.append('HTTP')\n if self.types.keys() & _HTTPS_PROTOS:\n _schemes.append('HTTPS')\n self._schemes = tuple(_schemes)\n return self._schemes\n\n @property\n def avg_resp_time(self):\n \"\"\"The average connection/response time.\n\n :rtype: float\n \"\"\"\n if not self._runtimes:\n return 0\n return round(sum(self._runtimes) / len(self._runtimes), 2)\n\n @property\n def avgRespTime(self):\n \"\"\"\n .. deprecated:: 2.0\n Use :attr:`avg_resp_time` instead.\n \"\"\"\n warnings.warn(\n '`avgRespTime` property is deprecated, ' 'use `avg_resp_time` instead.',\n DeprecationWarning,\n )\n return self.avg_resp_time\n\n @property\n def geo(self):\n \"\"\"Geo information about IP address of the proxy.\n\n :return:\n Named tuple with fields:\n * ``code`` - ISO country code\n * ``name`` - Full name of country\n * ``region_code`` - ISO region code\n * ``region_name`` - Full name of region\n * ``city_name`` - Full name of city\n :rtype: collections.namedtuple\n\n .. versionchanged:: 0.2.0\n In previous versions return a dictionary, now named tuple.\n \"\"\"\n return self._geo\n\n @property\n def ngtr(self):\n return self._ngtr\n\n @ngtr.setter\n def ngtr(self, proto):\n self._ngtr = NGTRS[proto](self)\n\n def as_json(self):\n \"\"\"Return the proxy's properties in JSON format.\n\n :rtype: dict\n \"\"\"\n info = {\n 'host': self.host,\n 'port': self.port,\n 'geo': {\n 'country': {'code': self._geo.code, 'name': self._geo.name},\n 'region': {\n 'code': self._geo.region_code,\n 'name': self._geo.region_name,\n },\n 'city': self._geo.city_name,\n },\n 'types': [],\n 'avg_resp_time': self.avg_resp_time,\n 'error_rate': self.error_rate,\n }\n\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n info['types'].append({'type': tp, 'level': lvl or ''})\n return info\n\n def as_text(self):\n \"\"\"\n Return proxy as host:port\n\n :rtype: str\n \"\"\"\n return \"{}:{}\\n\".format(self.host, self.port)\n\n def log(self, msg, stime=0, err=None):\n ngtr = self.ngtr.name if self.ngtr else 'INFO'\n runtime = time.time() - stime if stime else 0\n log.debug(\n '{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format(\n h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime\n )\n )\n trunc = '...' if len(msg) > 58 else ''\n msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc)\n self._log.append((ngtr, msg, runtime))\n if err:\n self.stat['errors'][err.errmsg] += 1\n if runtime and 'timeout' not in msg:\n self._runtimes.append(runtime)\n\n def get_log(self):\n \"\"\"Proxy log.\n\n :return: The proxy log in format: (negotaitor, msg, runtime)\n :rtype: tuple\n\n .. versionadded:: 0.2.0\n \"\"\"\n return self._log\n\n async def connect(self, ssl=False):\n err = None\n msg = '%s' % 'SSL: ' if ssl else ''\n stime = time.time()\n self.log('%sInitial connection' % msg)\n try:\n if ssl:\n _type = 'ssl'\n sock = self._writer['conn'].get_extra_info('socket')\n params = {\n 'ssl': self._ssl_context,\n 'sock': sock,\n 'server_hostname': self.host,\n }\n else:\n _type = 'conn'\n params = {'host': self.host, 'port': self.port}\n self._reader[_type], self._writer[_type] = await asyncio.wait_for(\n asyncio.open_connection(**params), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg += 'Connection: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionRefusedError, OSError, _ssl.SSLError):\n msg += 'Connection: failed'\n err = ProxyConnError(msg)\n raise err\n # except asyncio.CancelledError:\n # log.debug('Cancelled in proxy.connect()')\n # raise ProxyConnError()\n else:\n msg += 'Connection: success'\n self._closed = False\n finally:\n self.stat['requests'] += 1\n self.log(msg, stime, err=err)\n\n def close(self):\n if self._closed:\n return\n self._closed = True\n if self.writer:\n # try:\n self.writer.close()\n # except RuntimeError:\n # print('Try proxy.close() when loop is closed:',\n # asyncio.get_event_loop()._closed)\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n self.log('Connection: closed')\n self._ngtr = None\n\n async def send(self, req):\n msg, err = '', None\n _req = req.encode() if not isinstance(req, bytes) else req\n try:\n self.writer.write(_req)\n await self.writer.drain()\n except ConnectionResetError:\n msg = '; Sending: failed'\n err = ProxySendError(msg)\n raise err\n finally:\n self.log('Request: %s%s' % (req, msg), err=err)\n\n async def recv(self, length=0, head_only=False):\n resp, msg, err = b'', '', None\n stime = time.time()\n try:\n resp = await asyncio.wait_for(\n self._recv(length, head_only), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg = 'Received: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionResetError, OSError):\n msg = 'Received: failed' # (connection is reset by the peer)\n err = ProxyRecvError(msg)\n raise err\n else:\n msg = 'Received: %s bytes' % len(resp)\n if not resp:\n err = ProxyEmptyRecvError(msg)\n raise err\n finally:\n if resp:\n msg += ': %s' % resp[:12]\n self.log(msg, stime, err=err)\n return resp\n\n async def _recv(self, length=0, head_only=False):\n resp = b''\n if length:\n try:\n resp = await self.reader.readexactly(length)\n except asyncio.IncompleteReadError as e:\n resp = e.partial\n else:\n body_size, body_recv, chunked = 0, 0, None\n while not self.reader.at_eof():\n line = await self.reader.readline()\n resp += line\n if body_size:\n body_recv += len(line)\n if body_recv >= body_size:\n break\n elif chunked and line == b'0\\r\\n':\n break\n elif not body_size and line == b'\\r\\n':\n if head_only:\n break\n headers = parse_headers(resp)\n body_size = int(headers.get('Content-Length', 0))\n if not body_size:\n chunked = headers.get('Transfer-Encoding') == 'chunked'\n return resp" }, { "identifier": "Resolver", "path": "proxyhub/resolver.py", "snippet": "class Resolver:\n \"\"\"Async host resolver based on aiodns.\"\"\"\n\n _cached_hosts = {}\n _ip_hosts = [\n 'https://wtfismyip.com/text',\n 'http://api.ipify.org/',\n 'http://ipinfo.io/ip',\n 'http://ipv4.icanhazip.com/',\n 'http://myexternalip.com/raw',\n 'http://ipinfo.io/ip',\n 'http://ifconfig.io/ip',\n ]\n # the list of resolvers will point a copy of original one\n _temp_host = []\n\n def __init__(self, timeout=5, loop=None):\n self._timeout = timeout\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = aiodns.DNSResolver(loop=self._loop)\n\n @staticmethod\n def host_is_ip(host):\n \"\"\"Check a host is IP address.\"\"\"\n # TODO: add IPv6 support\n try:\n host = '.'.join(f'{int(n)}' for n in host.split('.'))\n ipaddress.IPv4Address(host)\n except (ipaddress.AddressValueError, ValueError):\n return False\n else:\n return True\n\n @staticmethod\n def get_ip_info(ip):\n \"\"\"Return geo information about IP address.\n\n `code` - ISO country code\n `name` - Full name of country\n `region_code` - ISO region code\n `region_name` - Full name of region\n `city_name` - Full name of city\n \"\"\"\n # from pprint import pprint\n try:\n ipInfo = _mmdb_reader.get(ip) or {}\n except (maxminddb.errors.InvalidDatabaseError, ValueError):\n ipInfo = {}\n\n code, name = '--', 'Unknown'\n city_name, region_code, region_name = ('Unknown',) * 3\n if 'country' in ipInfo:\n code = ipInfo['country']['iso_code']\n name = ipInfo['country']['names']['en']\n elif 'continent' in ipInfo:\n code = ipInfo['continent']['code']\n name = ipInfo['continent']['names']['en']\n if 'city' in ipInfo:\n city_name = ipInfo['city']['names']['en']\n if 'subdivisions' in ipInfo:\n region_code = ipInfo['subdivisions'][0]['iso_code']\n region_name = ipInfo['subdivisions'][0]['names']['en']\n return GeoData(code, name, region_code, region_name, city_name)\n\n def _pop_random_ip_host(self):\n host = random.choice(self._temp_host)\n self._temp_host.remove(host)\n return host\n\n async def get_real_ext_ip(self):\n \"\"\"Return real external IP address.\"\"\"\n # make a copy of original one to temp one\n # so original one will stay no change\n self._temp_host = self._ip_hosts.copy()\n while self._temp_host:\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with aiohttp.ClientSession(\n timeout=timeout, loop=self._loop\n ) as session, session.get(self._pop_random_ip_host()) as resp:\n ip = await resp.text()\n except asyncio.TimeoutError:\n pass\n else:\n ip = ip.strip()\n if self.host_is_ip(ip):\n log.debug('Real external IP: %s', ip)\n break\n else:\n raise RuntimeError('Could not get the external IP')\n return ip\n\n async def resolve(self, host, port=80, family=None, qtype='A', logging=True):\n \"\"\"Return resolving IP address(es) from host name.\"\"\"\n if self.host_is_ip(host):\n return host\n\n _host = self._cached_hosts.get(host)\n if _host:\n return _host\n\n resp = await self._resolve(host, qtype)\n\n if resp:\n hosts = [\n {\n 'hostname': host,\n 'host': r.host,\n 'port': port,\n 'family': family,\n 'proto': socket.IPPROTO_IP,\n 'flags': socket.AI_NUMERICHOST,\n }\n for r in resp\n ]\n if family:\n self._cached_hosts[host] = hosts\n else:\n self._cached_hosts[host] = hosts[0]['host']\n if logging:\n log.debug('%s: Host resolved: %s' % (host, self._cached_hosts[host]))\n else:\n if logging:\n log.warning('%s: Could not resolve host' % host)\n return self._cached_hosts.get(host)\n\n async def _resolve(self, host, qtype):\n try:\n resp = await asyncio.wait_for(\n self._resolver.query(host, qtype), timeout=self._timeout\n )\n except (aiodns.error.DNSError, asyncio.TimeoutError):\n raise ResolveError\n else:\n return resp" }, { "identifier": "Server", "path": "proxyhub/server.py", "snippet": "class Server:\n \"\"\"Server distributes incoming requests to a pool of found proxies.\"\"\"\n\n def __init__(\n self,\n host,\n port,\n proxies,\n timeout=8,\n max_tries=3,\n min_queue=5,\n min_req_proxy=5,\n max_error_rate=0.5,\n max_resp_time=8,\n prefer_connect=False,\n http_allowed_codes=None,\n backlog=100,\n loop=None,\n **kwargs,\n ):\n self.host = host\n self.port = int(port)\n self._loop = loop or asyncio.get_event_loop()\n self._timeout = timeout\n self._max_tries = max_tries\n self._backlog = backlog\n self._prefer_connect = prefer_connect\n\n self._server = None\n self._connections = {}\n self._proxy_pool = ProxyPool(\n proxies, min_req_proxy, max_error_rate, max_resp_time, min_queue\n )\n self._resolver = Resolver(loop=self._loop)\n self._http_allowed_codes = http_allowed_codes or []\n\n def start(self):\n\n srv = asyncio.start_server(\n self._accept,\n host=self.host,\n port=self.port,\n backlog=self._backlog,\n loop=self._loop,\n )\n self._server = self._loop.run_until_complete(srv)\n\n log.info(\n 'Listening established on {0}'.format(self._server.sockets[0].getsockname())\n )\n\n def stop(self):\n if not self._server:\n return\n for conn in self._connections:\n if not conn.done():\n conn.cancel()\n self._server.close()\n if not self._loop.is_running():\n self._loop.run_until_complete(self._server.wait_closed())\n # Time to close the running futures in self._connections\n self._loop.run_until_complete(asyncio.sleep(0.5))\n self._server = None\n self._loop.stop()\n log.info('Server is stopped')\n\n def _accept(self, client_reader, client_writer):\n def _on_completion(f):\n reader, writer = self._connections.pop(f)\n writer.close()\n log.debug('client: %d; closed' % id(client_reader))\n try:\n exc = f.exception()\n except asyncio.CancelledError:\n log.debug('CancelledError in server._handle:_on_completion')\n exc = None\n if exc:\n if isinstance(exc, NoProxyError):\n self.stop()\n else:\n raise exc\n\n f = asyncio.ensure_future(self._handle(client_reader, client_writer))\n f.add_done_callback(_on_completion)\n self._connections[f] = (client_reader, client_writer)\n\n async def _handle(self, client_reader, client_writer):\n log.debug(\n 'Accepted connection from %s' % (client_writer.get_extra_info('peername'),)\n )\n\n request, headers = await self._parse_request(client_reader)\n scheme = self._identify_scheme(headers)\n client = id(client_reader)\n log.debug(\n 'client: %d; request: %s; headers: %s; scheme: %s'\n % (client, request, headers, scheme)\n )\n\n # API for controlling proxyhub2\n if headers['Host'] == 'proxycontrol':\n _api, _operation, _params = headers['Path'].split('/', 5)[3:]\n if _api == 'api':\n if _operation == 'remove':\n proxy_host, proxy_port = _params.split(':', 1)\n self._proxy_pool.remove(proxy_host, int(proxy_port))\n log.debug(\n 'Remove Proxy: client: %d; request: %s; headers: %s; scheme: %s; proxy_host: %s; proxy_port: %s'\n % (client, request, headers, scheme, proxy_host, proxy_port)\n )\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n elif _operation == 'history':\n query_type, url = _params.split(':', 1)\n if query_type == 'url':\n previous_proxy = history.get(\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{url}\"\n )\n if previous_proxy is None:\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n else:\n previous_proxy_bytestring = (\n '{\"proxy\": \"%s\"}' % previous_proxy\n ).encode()\n client_writer.write(b'HTTP/1.1 200 OK\\r\\n')\n client_writer.write(b'Content-Type: application/json\\r\\n')\n client_writer.write(\n f\"Content-Length: {str(len(previous_proxy_bytestring) + 2).encode()}\\r\\n\"\n )\n client_writer.write(b'Access-Control-Allow-Origin: *\\r\\n')\n client_writer.write(\n b'Access-Control-Allow-Credentials: true\\r\\n\\r\\n'\n )\n\n client_writer.write(previous_proxy_bytestring + b'\\r\\n')\n await client_writer.drain()\n return\n\n for attempt in range(self._max_tries):\n stime, err = 0, None\n proxy = await self._proxy_pool.get(scheme)\n proto = self._choice_proto(proxy, scheme)\n log.debug(\n 'client: %d; attempt: %d; proxy: %s; proto: %s'\n % (client, attempt, proxy, proto)\n )\n\n try:\n await proxy.connect()\n\n if proto in ('CONNECT:80', 'SOCKS4', 'SOCKS5'):\n host = headers.get('Host')\n port = headers.get('Port', 80)\n try:\n ip = await self._resolver.resolve(host)\n except ResolveError:\n return\n proxy.ngtr = proto\n await proxy.ngtr.negotiate(host=host, port=port, ip=ip)\n if scheme == 'HTTPS' and proto in ('SOCKS4', 'SOCKS5'):\n client_writer.write(CONNECTED)\n await client_writer.drain()\n else: # HTTP\n await proxy.send(request)\n else: # proto: HTTP & HTTPS\n await proxy.send(request)\n\n history[\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{headers['Path']}\"\n ] = (proxy.host + ':' + str(proxy.port))\n inject_resp_header = {\n 'headers': {'X-Proxy-Info': proxy.host + ':' + str(proxy.port)}\n }\n\n stime = time.time()\n stream = [\n asyncio.ensure_future(\n self._stream(reader=client_reader, writer=proxy.writer)\n ),\n asyncio.ensure_future(\n self._stream(\n reader=proxy.reader,\n writer=client_writer,\n scheme=scheme,\n inject=inject_resp_header,\n )\n ),\n ]\n await asyncio.gather(*stream, loop=self._loop)\n except asyncio.CancelledError:\n log.debug('Cancelled in server._handle')\n break\n except (\n ProxyTimeoutError,\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n log.debug('client: %d; error: %r' % (client, e))\n continue\n except ErrorOnStream as e:\n log.debug(\n 'client: %d; error: %r; EOF: %s'\n % (client, e, client_reader.at_eof())\n )\n for task in stream:\n if not task.done():\n task.cancel()\n if client_reader.at_eof() and 'Timeout' in repr(e):\n # Proxy may not be able to receive EOF and weel be raised a\n # TimeoutError, but all the data has already successfully\n # returned, so do not consider this error of proxy\n break\n err = e\n if scheme == 'HTTPS': # SSL Handshake probably failed\n break\n else:\n break\n finally:\n proxy.log(request.decode(), stime, err=err)\n proxy.close()\n self._proxy_pool.put(proxy)\n\n async def _parse_request(self, reader, length=65536):\n request = await reader.read(length)\n headers = parse_headers(request)\n if headers['Method'] == 'POST' and request.endswith(b'\\r\\n\\r\\n'):\n # For aiohttp. POST data returns on second reading\n request += await reader.read(length)\n return request, headers\n\n def _identify_scheme(self, headers):\n if headers['Method'] == 'CONNECT':\n return 'HTTPS'\n else:\n return 'HTTP'\n\n def _choice_proto(self, proxy, scheme):\n if scheme == 'HTTP':\n if self._prefer_connect and ('CONNECT:80' in proxy.types):\n proto = 'CONNECT:80'\n else:\n relevant = {\n 'HTTP',\n 'CONNECT:80',\n 'SOCKS4',\n 'SOCKS5',\n } & proxy.types.keys()\n proto = relevant.pop()\n else: # HTTPS\n relevant = {'HTTPS', 'SOCKS4', 'SOCKS5'} & proxy.types.keys()\n proto = relevant.pop()\n return proto\n\n async def _stream(self, reader, writer, length=65536, scheme=None, inject=None):\n checked = False\n\n try:\n while not reader.at_eof():\n data = await asyncio.wait_for(reader.read(length), self._timeout)\n if not data:\n writer.close()\n break\n elif scheme and not checked:\n self._check_response(data, scheme)\n\n if inject.get('headers') is not None and len(inject['headers']) > 0:\n data = self._inject_headers(data, scheme, inject['headers'])\n\n checked = True\n\n writer.write(data)\n await writer.drain()\n\n except (\n asyncio.TimeoutError,\n ConnectionResetError,\n OSError,\n ProxyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n raise ErrorOnStream(e)\n\n def _check_response(self, data, scheme):\n if scheme == 'HTTP' and self._http_allowed_codes:\n line = data.split(b'\\r\\n', 1)[0].decode()\n try:\n header = parse_status_line(line)\n except BadStatusLine:\n raise BadResponseError\n if header['Status'] not in self._http_allowed_codes:\n raise BadStatusError(\n '%r not in %r' % (header['Status'], self._http_allowed_codes)\n )\n\n def _inject_headers(self, data, scheme, headers):\n custom_lines = []\n\n if scheme == 'HTTP' or scheme == 'HTTPS':\n status_line, rest_lines = data.split(b'\\r\\n', 1)\n custom_lines.append(status_line)\n\n for k, v in headers.items():\n custom_lines.append(('%s: %s' % (k, v)).encode())\n\n custom_lines.append(rest_lines)\n data = b'\\r\\n'.join(custom_lines)\n\n return data" }, { "identifier": "IPPortPatternLine", "path": "proxyhub/utils.py", "snippet": "BASE_DIR = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\ndef get_headers(rv=False):\ndef get_all_ip(page):\ndef get_status_code(resp, start=9, stop=12):\ndef parse_status_line(line):\ndef parse_headers(headers):\ndef update_geoip_db():" } ]
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
14,691
`Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxyhub-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' )
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] if stop_broker_on_sigint: try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxyhub-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs, ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxyhub-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxyhub-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' )
self._server = Server(
6
2023-11-05 13:28:57+00:00
24k
TheFunny/ArisuAutoSweeper
module/webui/app.py
[ { "identifier": "AzurLaneConfig", "path": "module/config/config.py", "snippet": "class AzurLaneConfig(ConfigUpdater, ManualConfig, GeneratedConfig, ConfigWatcher):\n stop_event: threading.Event = None\n bound = {}\n\n # Class property\n is_hoarding_task = True\n\n def __setattr__(self, key, value):\n if key in self.bound:\n path = self.bound[key]\n self.modified[path] = value\n if self.auto_update:\n self.update()\n else:\n super().__setattr__(key, value)\n\n def __init__(self, config_name, task=None):\n logger.attr(\"Lang\", self.LANG)\n # This will read ./config/<config_name>.json\n self.config_name = config_name\n # Raw json data in yaml file.\n self.data = {}\n # Modified arguments. Key: Argument path in yaml file. Value: Modified value.\n # All variable modifications will be record here and saved in method `save()`.\n self.modified = {}\n # Key: Argument name in GeneratedConfig. Value: Path in `data`.\n self.bound = {}\n # If write after every variable modification.\n self.auto_update = True\n # Force override variables\n # Key: Argument name in GeneratedConfig. Value: Modified value.\n self.overridden = {}\n # Scheduler queue, will be updated in `get_next_task()`, list of Function objects\n # pending_task: Run time has been reached, but haven't been run due to task scheduling.\n # waiting_task: Run time haven't been reached, wait needed.\n self.pending_task = []\n self.waiting_task = []\n # Task to run and bind.\n # Task means the name of the function to run in AzurLaneAutoScript class.\n self.task: Function\n # Template config is used for dev tools\n self.is_template_config = config_name.startswith(\"template\")\n\n if self.is_template_config:\n # For dev tools\n logger.info(\"Using template config, which is read only\")\n self.auto_update = False\n self.task = name_to_function(\"template\")\n else:\n self.load()\n if task is None:\n # Bind `Alas` by default which includes emulator settings.\n task = name_to_function(\"Alas\")\n else:\n # Bind a specific task for debug purpose.\n task = name_to_function(task)\n self.bind(task)\n self.task = task\n self.save()\n\n def load(self):\n self.data = self.read_file(self.config_name)\n self.config_override()\n\n for path, value in self.modified.items():\n deep_set(self.data, keys=path, value=value)\n\n def bind(self, func, func_list=None):\n \"\"\"\n Args:\n func (str, Function): Function to run\n func_list (set): Set of tasks to be bound\n \"\"\"\n if func_list is None:\n func_list = [\"Alas\"]\n if isinstance(func, Function):\n func = func.command\n func_list.append(func)\n logger.info(f\"Bind task {func_list}\")\n\n # Bind arguments\n visited = set()\n self.bound.clear()\n for func in func_list:\n func_data = self.data.get(func, {})\n for group, group_data in func_data.items():\n for arg, value in group_data.items():\n path = f\"{group}.{arg}\"\n if path in visited:\n continue\n arg = path_to_arg(path)\n super().__setattr__(arg, value)\n self.bound[arg] = f\"{func}.{path}\"\n visited.add(path)\n\n # Override arguments\n for arg, value in self.overridden.items():\n super().__setattr__(arg, value)\n\n @property\n def hoarding(self):\n minutes = int(\n deep_get(\n self.data, keys=\"Alas.Optimization.TaskHoardingDuration\", default=0\n )\n )\n return timedelta(minutes=max(minutes, 0))\n\n @property\n def close_game(self):\n return deep_get(\n self.data, keys=\"Alas.Optimization.CloseGameDuringWait\", default=False\n )\n\n @cached_property\n def stored(self) -> StoredGenerated:\n stored = StoredGenerated()\n # Bind config\n for _, value in iter_attribute(stored):\n value._bind(self)\n del_cached_property(value, '_stored')\n return stored\n\n def get_next_task(self):\n \"\"\"\n Calculate tasks, set pending_task and waiting_task\n \"\"\"\n pending = []\n waiting = []\n error = []\n now = datetime.now()\n if AzurLaneConfig.is_hoarding_task:\n now -= self.hoarding\n for func in self.data.values():\n func = Function(func)\n if not func.enable:\n continue\n if not isinstance(func.next_run, datetime):\n error.append(func)\n elif func.next_run < now:\n pending.append(func)\n else:\n waiting.append(func)\n\n f = Filter(regex=r\"(.*)\", attr=[\"command\"])\n f.load(self.SCHEDULER_PRIORITY)\n if pending:\n pending = f.apply(pending)\n if waiting:\n waiting = f.apply(waiting)\n waiting = sorted(waiting, key=operator.attrgetter(\"next_run\"))\n if error:\n pending = error + pending\n\n self.pending_task = pending\n self.waiting_task = waiting\n\n def get_next(self):\n \"\"\"\n Returns:\n Function: Command to run\n \"\"\"\n self.get_next_task()\n\n if self.pending_task:\n AzurLaneConfig.is_hoarding_task = False\n logger.info(f\"Pending tasks: {[f.command for f in self.pending_task]}\")\n task = self.pending_task[0]\n logger.attr(\"Task\", task)\n return task\n else:\n AzurLaneConfig.is_hoarding_task = True\n\n if self.waiting_task:\n logger.info(\"No task pending\")\n task = copy.deepcopy(self.waiting_task[0])\n task.next_run = (task.next_run + self.hoarding).replace(microsecond=0)\n logger.attr(\"Task\", task)\n return task\n else:\n logger.critical(\"No task waiting or pending\")\n logger.critical(\"Please enable at least one task\")\n raise RequestHumanTakeover\n\n def save(self, mod_name='alas'):\n if not self.modified:\n return False\n\n for path, value in self.modified.items():\n deep_set(self.data, keys=path, value=value)\n\n logger.info(\n f\"Save config {filepath_config(self.config_name, mod_name)}, {dict_to_kv(self.modified)}\"\n )\n # Don't use self.modified = {}, that will create a new object.\n self.modified.clear()\n del_cached_property(self, 'stored')\n self.write_file(self.config_name, data=self.data)\n\n def update(self):\n self.load()\n self.config_override()\n self.bind(self.task)\n self.save()\n\n def config_override(self):\n now = datetime.now().replace(microsecond=0)\n limited = set()\n\n def limit_next_run(tasks, limit):\n for task in tasks:\n if task in limited:\n continue\n limited.add(task)\n next_run = deep_get(\n self.data, keys=f\"{task}.Scheduler.NextRun\", default=None\n )\n if isinstance(next_run, datetime) and next_run > limit:\n deep_set(self.data, keys=f\"{task}.Scheduler.NextRun\", value=now)\n\n limit_next_run(['BattlePass'], limit=now + timedelta(days=31, seconds=-1))\n limit_next_run(self.args.keys(), limit=now + timedelta(hours=24, seconds=-1))\n\n def override(self, **kwargs):\n \"\"\"\n Override anything you want.\n Variables stall remain overridden even config is reloaded from yaml file.\n Note that this method is irreversible.\n \"\"\"\n for arg, value in kwargs.items():\n self.overridden[arg] = value\n super().__setattr__(arg, value)\n\n def set_record(self, **kwargs):\n \"\"\"\n Args:\n **kwargs: For example, `Emotion1_Value=150`\n will set `Emotion1_Value=150` and `Emotion1_Record=now()`\n \"\"\"\n with self.multi_set():\n for arg, value in kwargs.items():\n record = arg.replace(\"Value\", \"Record\")\n self.__setattr__(arg, value)\n self.__setattr__(record, datetime.now().replace(microsecond=0))\n\n def multi_set(self):\n \"\"\"\n Set multiple arguments but save once.\n\n Examples:\n with self.config.multi_set():\n self.config.foo1 = 1\n self.config.foo2 = 2\n \"\"\"\n return MultiSetWrapper(main=self)\n\n def cross_get(self, keys, default=None):\n \"\"\"\n Get configs from other tasks.\n\n Args:\n keys (str, list[str]): Such as `{task}.Scheduler.Enable`\n default:\n\n Returns:\n Any:\n \"\"\"\n return deep_get(self.data, keys=keys, default=default)\n\n def cross_set(self, keys, value):\n \"\"\"\n Set configs to other tasks.\n\n Args:\n keys (str, list[str]): Such as `{task}.Scheduler.Enable`\n value (Any):\n\n Returns:\n Any:\n \"\"\"\n self.modified[keys] = value\n if self.auto_update:\n self.update()\n\n def task_delay(self, success=None, server_update=None, target=None, minute=None, task=None):\n \"\"\"\n Set Scheduler.NextRun\n Should set at least one arguments.\n If multiple arguments are set, use the nearest.\n\n Args:\n success (bool):\n If True, delay Scheduler.SuccessInterval\n If False, delay Scheduler.FailureInterval\n server_update (bool, list, str):\n If True, delay to nearest Scheduler.ServerUpdate\n If type is list or str, delay to such server update\n target (datetime.datetime, str, list):\n Delay to such time.\n minute (int, float, tuple):\n Delay several minutes.\n task (str):\n Set across task. None for current task.\n \"\"\"\n\n def ensure_delta(delay):\n return timedelta(seconds=int(ensure_time(delay, precision=3) * 60))\n\n run = []\n if success is not None:\n interval = (\n 120\n if success\n else 30\n )\n run.append(datetime.now() + ensure_delta(interval))\n if server_update is not None:\n if server_update is True:\n server_update = self.Scheduler_ServerUpdate\n run.append(get_server_next_update(server_update))\n if target is not None:\n target = [target] if not isinstance(target, list) else target\n target = nearest_future(target)\n run.append(target)\n if minute is not None:\n run.append(datetime.now() + ensure_delta(minute))\n\n if len(run):\n run = min(run).replace(microsecond=0)\n kv = dict_to_kv(\n {\n \"success\": success,\n \"server_update\": server_update,\n \"target\": target,\n \"minute\": minute,\n },\n allow_none=False,\n )\n if task is None:\n task = self.task.command\n logger.info(f\"Delay task `{task}` to {run} ({kv})\")\n self.modified[f'{task}.Scheduler.NextRun'] = run\n self.update()\n else:\n raise ScriptError(\n \"Missing argument in delay_next_run, should set at least one\"\n )\n\n def task_call(self, task, force_call=True):\n \"\"\"\n Call another task to run.\n\n That task will run when current task finished.\n But it might not be run because:\n - Other tasks should run first according to SCHEDULER_PRIORITY\n - Task is disabled by user\n\n Args:\n task (str): Task name to call, such as `Restart`\n force_call (bool):\n\n Returns:\n bool: If called.\n \"\"\"\n if deep_get(self.data, keys=f\"{task}.Scheduler.NextRun\", default=None) is None:\n raise ScriptError(f\"Task to call: `{task}` does not exist in user config\")\n\n if force_call or self.is_task_enabled(task):\n logger.info(f\"Task call: {task}\")\n self.modified[f\"{task}.Scheduler.NextRun\"] = datetime.now().replace(\n microsecond=0\n )\n self.modified[f\"{task}.Scheduler.Enable\"] = True\n if self.auto_update:\n self.update()\n return True\n else:\n logger.info(f\"Task call: {task} (skipped because disabled by user)\")\n return False\n\n @staticmethod\n def task_stop(message=\"\"):\n \"\"\"\n Stop current task.\n\n Raises:\n TaskEnd:\n \"\"\"\n if message:\n raise TaskEnd(message)\n else:\n raise TaskEnd\n\n def task_switched(self):\n \"\"\"\n Check if needs to switch task.\n\n Raises:\n bool: If task switched\n \"\"\"\n # Update event\n if self.stop_event is not None:\n if self.stop_event.is_set():\n return True\n prev = self.task\n self.load()\n new = self.get_next()\n if prev == new:\n logger.info(f\"Continue task `{new}`\")\n return False\n else:\n logger.info(f\"Switch task `{prev}` to `{new}`\")\n return True\n\n def check_task_switch(self, message=\"\"):\n \"\"\"\n Stop current task when task switched.\n\n Raises:\n TaskEnd:\n \"\"\"\n if self.task_switched():\n self.task_stop(message=message)\n\n def is_task_enabled(self, task):\n return bool(self.cross_get(keys=[task, 'Scheduler', 'Enable'], default=False))\n\n def update_daily_quests(self):\n \"\"\"\n Raises:\n TaskEnd: Call task `DailyQuest` and stop current task\n \"\"\"\n if self.stored.DailyActivity.is_expired():\n logger.info('DailyActivity expired, call task to update')\n self.task_call('DailyQuest')\n self.task_stop()\n if self.stored.DailyQuest.is_expired():\n logger.info('DailyQuest expired, call task to update')\n self.task_call('DailyQuest')\n self.task_stop()\n\n @property\n def DEVICE_SCREENSHOT_METHOD(self):\n return self.Emulator_ScreenshotMethod\n\n @property\n def DEVICE_CONTROL_METHOD(self):\n return self.Emulator_ControlMethod\n\n def temporary(self, **kwargs):\n \"\"\"\n Cover some settings, and recover later.\n\n Usage:\n backup = self.config.cover(ENABLE_DAILY_REWARD=False)\n # do_something()\n backup.recover()\n\n Args:\n **kwargs:\n\n Returns:\n ConfigBackup:\n \"\"\"\n backup = ConfigBackup(config=self)\n backup.cover(**kwargs)\n return backup" }, { "identifier": "Function", "path": "module/config/config.py", "snippet": "class Function:\n def __init__(self, data):\n self.enable = deep_get(data, keys=\"Scheduler.Enable\", default=False)\n self.command = deep_get(data, keys=\"Scheduler.Command\", default=\"Unknown\")\n self.next_run = deep_get(data, keys=\"Scheduler.NextRun\", default=DEFAULT_TIME)\n\n def __str__(self):\n enable = \"Enable\" if self.enable else \"Disable\"\n return f\"{self.command} ({enable}, {str(self.next_run)})\"\n\n __repr__ = __str__\n\n def __eq__(self, other):\n if not isinstance(other, Function):\n return False\n\n if self.command == other.command and self.next_run == other.next_run:\n return True\n else:\n return False" }, { "identifier": "alas_instance", "path": "module/config/utils.py", "snippet": "def alas_instance():\n \"\"\"\n Returns:\n list[str]: Name of all Alas instances, except `template`.\n \"\"\"\n out = []\n for file in os.listdir('./config'):\n name, extension = os.path.splitext(file)\n config_name, mod_name = os.path.splitext(name)\n mod_name = mod_name[1:]\n if name != 'template' and extension == '.json' and mod_name == '':\n out.append(name)\n\n # out.extend(mod_instance())\n\n if not len(out):\n out = ['aas']\n\n return out" }, { "identifier": "alas_template", "path": "module/config/utils.py", "snippet": "def alas_template():\n \"\"\"\n Returns:\n list[str]: Name of all Alas instances, except `template`.\n \"\"\"\n out = []\n for file in os.listdir('./config'):\n name, extension = os.path.splitext(file)\n if name == 'template' and extension == '.json':\n out.append(f'{name}-aas')\n\n # out.extend(mod_template())\n\n return out" }, { "identifier": "deep_get", "path": "module/config/utils.py", "snippet": "def deep_get(d, keys, default=None):\n \"\"\"\n Get values in dictionary safely.\n https://stackoverflow.com/questions/25833613/safe-method-to-get-value-of-nested-dictionary\n\n Args:\n d (dict):\n keys (str, list): Such as `Scheduler.NextRun.value`\n default: Default return if key not found.\n\n Returns:\n\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split('.')\n assert type(keys) is list\n if d is None:\n return default\n if not keys:\n return d\n return deep_get(d.get(keys[0]), keys[1:], default)" }, { "identifier": "deep_iter", "path": "module/config/utils.py", "snippet": "def deep_iter(data, depth=0, current_depth=1):\n \"\"\"\n Iter a dictionary safely.\n\n Args:\n data (dict):\n depth (int): Maximum depth to iter\n current_depth (int):\n\n Returns:\n list: Key path\n Any:\n \"\"\"\n if isinstance(data, dict) \\\n and (depth and current_depth <= depth):\n for key, value in data.items():\n for child_path, child_value in deep_iter(value, depth=depth, current_depth=current_depth + 1):\n yield [key] + child_path, child_value\n else:\n yield [], data" }, { "identifier": "deep_set", "path": "module/config/utils.py", "snippet": "def deep_set(d, keys, value):\n \"\"\"\n Set value into dictionary safely, imitating deep_get().\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split('.')\n assert type(keys) is list\n if not keys:\n return value\n if not isinstance(d, dict):\n d = {}\n d[keys[0]] = deep_set(d.get(keys[0], {}), keys[1:], value)\n return d" }, { "identifier": "dict_to_kv", "path": "module/config/utils.py", "snippet": "def dict_to_kv(dictionary, allow_none=True):\n \"\"\"\n Args:\n dictionary: Such as `{'path': 'Scheduler.ServerUpdate', 'value': True}`\n allow_none (bool):\n\n Returns:\n str: Such as `path='Scheduler.ServerUpdate', value=True`\n \"\"\"\n return ', '.join([f'{k}={repr(v)}' for k, v in dictionary.items() if allow_none or v is not None])" }, { "identifier": "filepath_args", "path": "module/config/utils.py", "snippet": "def filepath_args(filename='args', mod_name='alas'):\n return f'./module/config/argument/{filename}.json'" }, { "identifier": "filepath_config", "path": "module/config/utils.py", "snippet": "def filepath_config(filename, mod_name='alas'):\n if mod_name == 'alas':\n return os.path.join('./config', f'{filename}.json')\n else:\n return os.path.join('./config', f'{filename}.{mod_name}.json')" }, { "identifier": "read_file", "path": "module/config/utils.py", "snippet": "def read_file(file):\n \"\"\"\n Read a file, support both .yaml and .json format.\n Return empty dict if file not exists.\n\n Args:\n file (str):\n\n Returns:\n dict, list:\n \"\"\"\n folder = os.path.dirname(file)\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n if not os.path.exists(file):\n return {}\n\n _, ext = os.path.splitext(file)\n lock = FileLock(f\"{file}.lock\")\n with lock:\n print(f'read: {file}')\n if ext == '.yaml':\n with open(file, mode='r', encoding='utf-8') as f:\n s = f.read()\n data = list(yaml.safe_load_all(s))\n if len(data) == 1:\n data = data[0]\n if not data:\n data = {}\n return data\n elif ext == '.json':\n with open(file, mode='r', encoding='utf-8') as f:\n s = f.read()\n return json.loads(s)\n else:\n print(f'Unsupported config file extension: {ext}')\n return {}" }, { "identifier": "logger", "path": "module/logger/logger.py", "snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecord) -> bool:\n def options(self) -> ConsoleOptions:\ndef _set_file_logger(name=pyw_name):\ndef set_file_logger(name=pyw_name):\ndef set_func_logger(func):\ndef _get_renderables(\n self: Console, *objects, sep=\" \", end=\"\\n\", justify=None, emoji=None, markup=None, highlight=None,\n) -> List[ConsoleRenderable]:\ndef print(*objects: ConsoleRenderable, **kwargs):\ndef rule(title=\"\", *, characters=\"─\", style=\"rule.line\", end=\"\\n\", align=\"center\"):\ndef hr(title, level=3):\ndef attr(name, text):\ndef attr_align(name, text, front='', align=22):\ndef show():\ndef error_convert(func):\n def error_wrapper(msg, *args, **kwargs):\nclass RichFileHandler(RichHandler):\nclass RichRenderableHandler(RichHandler):\nclass HTMLConsole(Console):\nclass Highlighter(RegexHighlighter):\nWEB_THEME = Theme({\n \"web.brace\": Style(bold=True),\n \"web.bool_true\": Style(color=\"bright_green\", italic=True),\n \"web.bool_false\": Style(color=\"bright_red\", italic=True),\n \"web.none\": Style(color=\"magenta\", italic=True),\n \"web.path\": Style(color=\"magenta\"),\n \"web.filename\": Style(color=\"bright_magenta\"),\n \"web.str\": Style(color=\"green\", italic=False, bold=False),\n \"web.time\": Style(color=\"cyan\"),\n \"rule.text\": Style(bold=True),\n})" }, { "identifier": "Frame", "path": "module/webui/base.py", "snippet": "class Frame(Base):\n def __init__(self) -> None:\n super().__init__()\n self.page = \"Home\"\n\n def init_aside(self, expand_menu: bool = True, name: str = None) -> None:\n \"\"\"\n Call this in aside button callback function.\n Args:\n expand_menu: expand menu\n name: button name(label) to be highlight\n \"\"\"\n self.visible = True\n self.scope_clear()\n self.task_handler.remove_pending_task()\n clear(\"menu\")\n if expand_menu:\n self.expand_menu()\n if name:\n self.active_button(\"aside\", name)\n set_localstorage(\"aside\", name)\n\n def init_menu(self, collapse_menu: bool = True, name: str = None) -> None:\n \"\"\"\n Call this in menu button callback function.\n Args:\n collapse_menu: collapse menu\n name: button name(label) to be highlight\n \"\"\"\n self.visible = True\n self.page = name\n self.scope_clear()\n self.task_handler.remove_pending_task()\n clear(\"content\")\n if collapse_menu:\n self.collapse_menu()\n if name:\n self.active_button(\"menu\", name)\n\n @staticmethod\n @use_scope(\"ROOT\", clear=True)\n def _show() -> None:\n put_scope(\n \"header\",\n [\n put_html(Icon.ALAS).style(\"--header-icon--\"),\n put_text(\"AAS\").style(\"--header-text--\"),\n put_scope(\"header_status\"),\n put_scope(\"header_title\"),\n ],\n )\n put_scope(\n \"contents\",\n [\n put_scope(\"aside\"),\n put_scope(\"menu\"),\n put_scope(\"content\"),\n ],\n )\n\n @staticmethod\n @use_scope(\"header_title\", clear=True)\n def set_title(text=\"\"):\n put_text(text)\n\n @staticmethod\n def collapse_menu() -> None:\n run_js(\n f\"\"\"\n $(\"#pywebio-scope-menu\").addClass(\"container-menu-collapsed\");\n $(\".container-content-collapsed\").removeClass(\"container-content-collapsed\");\n \"\"\"\n )\n\n @staticmethod\n def expand_menu() -> None:\n run_js(\n f\"\"\"\n $(\".container-menu-collapsed\").removeClass(\"container-menu-collapsed\");\n $(\"#pywebio-scope-content\").addClass(\"container-content-collapsed\");\n \"\"\"\n )\n\n @staticmethod\n def active_button(position, value) -> None:\n run_js(\n f\"\"\"\n $(\"button.btn-{position}\").removeClass(\"btn-{position}-active\");\n $(\"div[style*='--{position}-{value}--']>button\").addClass(\"btn-{position}-active\");\n \"\"\"\n )\n\n @staticmethod\n def pin_set_invalid_mark(keys) -> None:\n if isinstance(keys, str):\n keys = [keys]\n keys = [\"_\".join(key.split(\".\")) for key in keys]\n js = \"\".join(\n [\n f\"\"\"$(\".form-control[name='{key}']\").addClass('is-invalid');\"\"\"\n for key in keys\n ]\n )\n if js:\n run_js(js)\n # for key in keys:\n # pin_update(key, valid_status=False)\n\n @staticmethod\n def pin_remove_invalid_mark(keys) -> None:\n if isinstance(keys, str):\n keys = [keys]\n keys = [\"_\".join(key.split(\".\")) for key in keys]\n js = \"\".join(\n [\n f\"\"\"$(\".form-control[name='{key}']\").removeClass('is-invalid');\"\"\"\n for key in keys\n ]\n )\n if js:\n run_js(js)\n # for key in keys:\n # pin_update(key, valid_status=0)" }, { "identifier": "get_config_mod", "path": "module/webui/fake.py", "snippet": "def get_config_mod(config_name):\n \"\"\"\n Args:\n config_name (str):\n \"\"\"\n return 'alas'" }, { "identifier": "load_config", "path": "module/webui/fake.py", "snippet": "def load_config(config_name):\n return AzurLaneConfig(config_name, '')" }, { "identifier": "asgi_app", "path": "module/webui/fastapi.py", "snippet": "def asgi_app(\n applications,\n cdn=True,\n static_dir=None,\n debug=False,\n allowed_origins=None,\n check_origin=None,\n **starlette_settings\n):\n debug = Session.debug = os.environ.get(\"PYWEBIO_DEBUG\", debug)\n cdn = cdn_validation(cdn, \"warn\")\n if cdn is False:\n cdn = \"pywebio_static\"\n routes = webio_routes(\n applications,\n cdn=cdn,\n allowed_origins=allowed_origins,\n check_origin=check_origin,\n )\n if static_dir:\n routes.append(\n Mount(\"/static\", app=StaticFiles(directory=static_dir), name=\"static\")\n )\n routes.append(\n Mount(\n \"/pywebio_static\",\n app=StaticFiles(directory=STATIC_PATH),\n name=\"pywebio_static\",\n )\n )\n middleware = [Middleware(HeaderMiddleware)]\n return Starlette(\n routes=routes, middleware=middleware, debug=debug, **starlette_settings\n )" }, { "identifier": "_t", "path": "module/webui/lang.py", "snippet": "def _t(s, lang=None):\n \"\"\"\n Get translation, ignore TRANSLATE_MODE\n \"\"\"\n if not lang:\n lang = LANG\n try:\n return dic_lang[lang][s]\n except KeyError:\n print(f\"Language key ({s}) not found\")\n return s" }, { "identifier": "t", "path": "module/webui/lang.py", "snippet": "def t(s, *args, **kwargs):\n \"\"\"\n Get translation.\n other args, kwargs pass to .format()\n \"\"\"\n if TRANSLATE_MODE:\n return s\n return _t(s, LANG).format(*args, **kwargs)" }, { "identifier": "put_input", "path": "module/webui/pin.py", "snippet": "def put_input(name, type='text', *, label='', value=None, placeholder=None, readonly=None, datalist=None,\n help_text=None, scope=None, position=OutputPosition.BOTTOM, **other_html_attrs) -> Output:\n \"\"\"Output an input widget. Refer to: `pywebio.input.input()`\"\"\"\n from pywebio.input import input\n check_dom_name_value(name, 'pin `name`')\n single_input_return = input(name=name, label=label, value=value, type=type, placeholder=placeholder,\n readonly=readonly, datalist=datalist, help_text=help_text, **other_html_attrs)\n return _pin_output(single_input_return, scope, position)" }, { "identifier": "put_select", "path": "module/webui/pin.py", "snippet": "def put_select(name, options=None, *, label='', multiple=None, value=None, help_text=None,\n scope=None, position=OutputPosition.BOTTOM, **other_html_attrs) -> Output:\n \"\"\"Output a select widget. Refer to: `pywebio.input.select()`\"\"\"\n from pywebio.input import select\n check_dom_name_value(name, 'pin `name`')\n single_input_return = select(name=name, options=options, label=label, multiple=multiple,\n value=value, help_text=help_text, **other_html_attrs)\n return _pin_output(single_input_return, scope, position)" }, { "identifier": "ProcessManager", "path": "module/webui/process_manager.py", "snippet": "class ProcessManager:\n _processes: Dict[str, \"ProcessManager\"] = {}\n\n def __init__(self, config_name: str = \"alas\") -> None:\n self.config_name = config_name\n self._renderable_queue: queue.Queue[ConsoleRenderable] = State.manager.Queue()\n self.renderables: List[ConsoleRenderable] = []\n self.renderables_max_length = 400\n self.renderables_reduce_length = 80\n self._process: Process = None\n self.thd_log_queue_handler: threading.Thread = None\n\n def start(self, func, ev: threading.Event = None) -> None:\n if not self.alive:\n if func is None:\n func = get_config_mod(self.config_name)\n self._process = Process(\n target=ProcessManager.run_process,\n args=(\n self.config_name,\n func,\n self._renderable_queue,\n ev,\n ),\n )\n self._process.start()\n self.start_log_queue_handler()\n\n def start_log_queue_handler(self):\n if (\n self.thd_log_queue_handler is not None\n and self.thd_log_queue_handler.is_alive()\n ):\n return\n self.thd_log_queue_handler = threading.Thread(\n target=self._thread_log_queue_handler\n )\n self.thd_log_queue_handler.start()\n\n def stop(self) -> None:\n lock = FileLock(f\"{filepath_config(self.config_name)}.lock\")\n with lock:\n if self.alive:\n self._process.kill()\n self.renderables.append(\n f\"[{self.config_name}] exited. Reason: Manual stop\\n\"\n )\n if self.thd_log_queue_handler is not None:\n self.thd_log_queue_handler.join(timeout=1)\n if self.thd_log_queue_handler.is_alive():\n logger.warning(\n \"Log queue handler thread does not stop within 1 seconds\"\n )\n logger.info(f\"[{self.config_name}] exited\")\n\n def _thread_log_queue_handler(self) -> None:\n while self.alive:\n try:\n log = self._renderable_queue.get(timeout=1)\n except queue.Empty:\n continue\n self.renderables.append(log)\n if len(self.renderables) > self.renderables_max_length:\n self.renderables = self.renderables[self.renderables_reduce_length :]\n logger.info(\"End of log queue handler loop\")\n\n @property\n def alive(self) -> bool:\n if self._process is not None:\n return self._process.is_alive()\n else:\n return False\n\n @property\n def state(self) -> int:\n if self.alive:\n return 1\n elif len(self.renderables) == 0:\n return 2\n else:\n console = Console(no_color=True)\n with console.capture() as capture:\n console.print(self.renderables[-1])\n s = capture.get().strip()\n if s.endswith(\"Reason: Manual stop\"):\n return 2\n elif s.endswith(\"Reason: Finish\"):\n return 2\n elif s.endswith(\"Reason: Update\"):\n return 4\n else:\n return 3\n\n @classmethod\n def get_manager(cls, config_name: str) -> \"ProcessManager\":\n \"\"\"\n Create a new alas if not exists.\n \"\"\"\n if config_name not in cls._processes:\n cls._processes[config_name] = ProcessManager(config_name)\n return cls._processes[config_name]\n\n @staticmethod\n def run_process(\n config_name, func: str, q: queue.Queue, e: threading.Event = None\n ) -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--electron\", action=\"store_true\", help=\"Runs by electron client.\"\n )\n args, _ = parser.parse_known_args()\n State.electron = args.electron\n\n # Setup logger\n set_file_logger(name=config_name)\n if State.electron:\n # https://github.com/LmeSzinc/AzurLaneAutoScript/issues/2051\n logger.info(\"Electron detected, remove log output to stdout\")\n from module.logger.logger import console_hdlr\n logger.removeHandler(console_hdlr)\n set_func_logger(func=q.put)\n\n from module.config.config import AzurLaneConfig\n\n AzurLaneConfig.stop_event = e\n try:\n # Run alas\n if func == \"alas\":\n from module.alas import AzurLaneAutoScript\n from aas import ArisuAutoSweeper\n\n if e is not None:\n AzurLaneAutoScript.stop_event = e\n ArisuAutoSweeper(config_name=config_name).loop()\n else:\n logger.critical(f\"No function matched: {func}\")\n logger.info(f\"[{config_name}] exited. Reason: Finish\\n\")\n except Exception as e:\n logger.exception(e)\n\n @classmethod\n def running_instances(cls) -> List[\"ProcessManager\"]:\n l = []\n for process in cls._processes.values():\n if process.alive:\n l.append(process)\n return l\n\n @staticmethod\n def restart_processes(\n instances: List[Union[\"ProcessManager\", str]] = None, ev: threading.Event = None\n ):\n \"\"\"\n After update and reload, or failed to perform an update,\n restart all alas that running before update\n \"\"\"\n logger.hr(\"Restart alas\")\n\n # Load MOD_CONFIG_DICT\n mod_instance()\n\n if instances is None:\n instances = []\n\n _instances = set()\n\n for instance in instances:\n if isinstance(instance, str):\n _instances.add(ProcessManager.get_manager(instance))\n elif isinstance(instance, ProcessManager):\n _instances.add(instance)\n\n try:\n with open(\"./config/reloadalas\", mode=\"r\") as f:\n for line in f.readlines():\n line = line.strip()\n _instances.add(ProcessManager.get_manager(line))\n except FileNotFoundError:\n pass\n\n for process in _instances:\n logger.info(f\"Starting [{process.config_name}]\")\n process.start(func=get_config_mod(process.config_name), ev=ev)\n\n try:\n os.remove(\"./config/reloadalas\")\n except:\n pass\n logger.info(\"Start alas complete\")" }, { "identifier": "RemoteAccess", "path": "module/webui/remote_access.py", "snippet": "class RemoteAccess:\n @staticmethod\n def keep_ssh_alive():\n task_handler: TaskHandler\n task_handler = yield\n while True:\n if _ssh_thread is not None and _ssh_thread.is_alive():\n yield\n continue\n logger.info(\"Remote access service is not running, starting now\")\n try:\n start_remote_access_service()\n except ParseError as e:\n logger.exception(e)\n task_handler.remove_current_task()\n yield\n\n @staticmethod\n def kill_ssh_process():\n if RemoteAccess.is_alive():\n _ssh_process.kill()\n\n @staticmethod\n def is_alive():\n return (\n _ssh_thread is not None\n and _ssh_thread.is_alive()\n and _ssh_process is not None\n and _ssh_process.poll() is None\n )\n\n @staticmethod\n def get_state():\n if RemoteAccess.is_alive():\n if address is not None:\n return 1\n else:\n return 2\n elif _ssh_notfound:\n return 3\n else:\n return 0\n\n @staticmethod\n def get_entry_point():\n return address if RemoteAccess.is_alive() else None" }, { "identifier": "State", "path": "module/webui/setting.py", "snippet": "class State:\n \"\"\"\n Shared settings\n \"\"\"\n\n _init = False\n _clearup = False\n\n restart_event: threading.Event = None\n manager: SyncManager = None\n electron: bool = False\n theme: str = \"default\"\n\n @classmethod\n def init(cls):\n cls.manager = multiprocessing.Manager()\n cls._init = True\n\n @classmethod\n def clearup(cls):\n cls.manager.shutdown()\n cls._clearup = True\n\n @cached_class_property\n def deploy_config(self) -> \"DeployConfig\":\n \"\"\"\n Returns:\n DeployConfig:\n \"\"\"\n from module.webui.config import DeployConfig\n\n return DeployConfig()\n\n @cached_class_property\n def config_updater(self) -> \"ConfigUpdater\":\n \"\"\"\n Returns:\n ConfigUpdater:\n \"\"\"\n from module.config.config_updater import ConfigUpdater\n\n return ConfigUpdater()" }, { "identifier": "updater", "path": "module/webui/updater.py", "snippet": "class Updater(DeployConfig, GitManager, PipManager):\n def __init__(self, file=DEPLOY_CONFIG):\n def delay(self):\n def schedule_time(self):\n def execute_output(self, command) -> str:\n def get_commit(self, revision=\"\", n=1, short_sha1=False) -> Tuple:\n def _check_update(self) -> bool:\n def _check_update_(self) -> bool:\n def check_update(self):\n def git_install(self):\n def pip_install(self):\n def update(self):\n def run_update(self):\n def _start_update(self):\n def _wait_update(self, instances: List[ProcessManager], names):\n def _run_update(self, instances, names):\n def _trigger_reload(delay=2):\n def trigger():\n def schedule_update(self) -> Generator:\n def cancel(self):" }, { "identifier": "Icon", "path": "module/webui/utils.py", "snippet": "class Icon:\n \"\"\"\n Storage html of icon.\n \"\"\"\n\n ALAS = _read(filepath_icon(\"alas\"))\n SETTING = _read(filepath_icon(\"setting\"))\n RUN = _read(filepath_icon(\"run\"))\n DEVELOP = _read(filepath_icon(\"develop\"))\n ADD = _read(filepath_icon(\"add\"))" }, { "identifier": "Switch", "path": "module/webui/utils.py", "snippet": "class Switch:\n def __init__(self, status, get_state, name=None):\n \"\"\"\n Args:\n status\n (dict):A dict describes each state.\n {\n 0: {\n 'func': (Callable)\n },\n 1: {\n 'func'\n 'args': (Optional, tuple)\n 'kwargs': (Optional, dict)\n },\n 2: [\n func1,\n {\n 'func': func2\n 'args': args2\n }\n ]\n -1: []\n }\n (Callable):current state will pass into this function\n lambda state: do_update(state=state)\n get_state:\n (Callable):\n return current state\n (Generator):\n yield current state, do nothing when state not in status\n name:\n \"\"\"\n self._lock = threading.Lock()\n self.name = name\n self.status = status\n self.get_state = get_state\n if isinstance(get_state, Generator):\n self._generator = get_state\n elif isinstance(get_state, Callable):\n self._generator = self._get_state()\n\n @staticmethod\n def get_state():\n pass\n\n def _get_state(self):\n \"\"\"\n Predefined generator when `get_state` is an callable\n Customize it if you have multiple criteria on state\n \"\"\"\n _status = self.get_state()\n yield _status\n while True:\n status = self.get_state()\n if _status != status:\n _status = status\n yield _status\n continue\n yield -1\n\n def switch(self):\n with self._lock:\n r = next(self._generator)\n if callable(self.status):\n self.status(r)\n elif r in self.status:\n f = self.status[r]\n if isinstance(f, (dict, Callable)):\n f = [f]\n for d in f:\n if isinstance(d, Callable):\n d = {\"func\": d}\n func = d[\"func\"]\n args = d.get(\"args\", tuple())\n kwargs = d.get(\"kwargs\", dict())\n func(*args, **kwargs)\n\n def g(self) -> Generator:\n g = get_generator(self.switch)\n if self.name:\n name = self.name\n else:\n name = self.get_state.__name__\n g.__name__ = f\"Switch_{name}_refresh\"\n return g" }, { "identifier": "TaskHandler", "path": "module/webui/utils.py", "snippet": "class TaskHandler:\n def __init__(self) -> None:\n # List of background running task\n self.tasks: List[Task] = []\n # List of task name to be removed\n self.pending_remove_tasks: List[Task] = []\n # Running task\n self._task = None\n # Task running thread\n self._thread: threading.Thread = None\n self._alive = False\n self._lock = threading.Lock()\n\n def add(self, func, delay: float, pending_delete: bool = False) -> None:\n \"\"\"\n Add a task running background.\n Another way of `self.add_task()`.\n func: Callable or Generator\n \"\"\"\n if isinstance(func, Callable):\n g = get_generator(func)\n elif isinstance(func, Generator):\n g = func\n self.add_task(Task(g, delay), pending_delete=pending_delete)\n\n def add_task(self, task: Task, pending_delete: bool = False) -> None:\n \"\"\"\n Add a task running background.\n \"\"\"\n if task in self.tasks:\n logger.warning(f\"Task {task} already in tasks list.\")\n return\n logger.info(f\"Add task {task}\")\n with self._lock:\n self.tasks.append(task)\n if pending_delete:\n self.pending_remove_tasks.append(task)\n\n def _remove_task(self, task: Task) -> None:\n if task in self.tasks:\n self.tasks.remove(task)\n logger.info(f\"Task {task} removed.\")\n else:\n logger.warning(\n f\"Failed to remove task {task}. Current tasks list: {self.tasks}\"\n )\n\n def remove_task(self, task: Task, nowait: bool = False) -> None:\n \"\"\"\n Remove a task in `self.tasks`.\n Args:\n task:\n nowait: if True, remove it right now,\n otherwise remove when call `self.remove_pending_task`\n \"\"\"\n if nowait:\n with self._lock:\n self._remove_task(task)\n else:\n self.pending_remove_tasks.append(task)\n\n def remove_pending_task(self) -> None:\n \"\"\"\n Remove all pending remove tasks.\n \"\"\"\n with self._lock:\n for task in self.pending_remove_tasks:\n self._remove_task(task)\n self.pending_remove_tasks = []\n\n def remove_current_task(self) -> None:\n self.remove_task(self._task, nowait=True)\n\n def get_task(self, name) -> Task:\n with self._lock:\n for task in self.tasks:\n if task.name == name:\n return task\n return None\n\n def loop(self) -> None:\n \"\"\"\n Start task loop.\n You **should** run this function in an individual thread.\n \"\"\"\n self._alive = True\n while self._alive:\n if self.tasks:\n with self._lock:\n self.tasks.sort(key=operator.attrgetter(\"next_run\"))\n task = self.tasks[0]\n if task.next_run < time.time():\n start_time = time.time()\n try:\n self._task = task\n # logger.debug(f'Start task {task.g.__name__}')\n task.send(self)\n # logger.debug(f'End task {task.g.__name__}')\n except Exception as e:\n logger.exception(e)\n self.remove_task(task, nowait=True)\n finally:\n self._task = None\n end_time = time.time()\n task.next_run += task.delay\n with self._lock:\n for task in self.tasks:\n task.next_run += end_time - start_time\n else:\n time.sleep(0.05)\n else:\n time.sleep(0.5)\n logger.info(\"End of task handler loop\")\n\n def _get_thread(self) -> threading.Thread:\n thread = threading.Thread(target=self.loop, daemon=True)\n return thread\n\n def start(self) -> None:\n \"\"\"\n Start task handler.\n \"\"\"\n logger.info(\"Start task handler\")\n if self._thread is not None and self._thread.is_alive():\n logger.warning(\"Task handler already running!\")\n return\n self._thread = self._get_thread()\n self._thread.start()\n\n def stop(self) -> None:\n self.remove_pending_task()\n self._alive = False\n self._thread.join(timeout=2)\n if not self._thread.is_alive():\n logger.info(\"Finish task handler\")\n else:\n logger.warning(\"Task handler does not stop within 2 seconds\")" }, { "identifier": "add_css", "path": "module/webui/utils.py", "snippet": "def add_css(filepath):\n with open(filepath, \"r\") as f:\n css = f.read().replace(\"\\n\", \"\")\n run_js(f\"\"\"$('head').append('<style>{css}</style>')\"\"\")" }, { "identifier": "filepath_css", "path": "module/webui/utils.py", "snippet": "def filepath_css(filename):\n return f\"./assets/gui/css/{filename}.css\"" }, { "identifier": "get_alas_config_listen_path", "path": "module/webui/utils.py", "snippet": "def get_alas_config_listen_path(args):\n for path, d in deep_iter(args, depth=3):\n if d.get(\"display\") in [\"readonly\", \"hide\"]:\n continue\n yield path" }, { "identifier": "get_localstorage", "path": "module/webui/utils.py", "snippet": "def get_localstorage(key):\n return eval_js(\"localStorage.getItem(key)\", key=key)" }, { "identifier": "get_window_visibility_state", "path": "module/webui/utils.py", "snippet": "def get_window_visibility_state():\n ret = eval_js(\"document.visibilityState\")\n return False if ret == \"hidden\" else True" }, { "identifier": "login", "path": "module/webui/utils.py", "snippet": "def login(password):\n if get_localstorage(\"password\") == str(password):\n return True\n pwd = input(label=\"Please login below.\", type=PASSWORD, placeholder=\"PASSWORD\")\n if str(pwd) == str(password):\n set_localstorage(\"password\", str(pwd))\n return True\n else:\n toast(\"Wrong password!\", color=\"error\")\n return False" }, { "identifier": "parse_pin_value", "path": "module/webui/utils.py", "snippet": "def parse_pin_value(val, valuetype: str = None):\n \"\"\"\n input, textarea return str\n select return its option (str or int)\n checkbox return [] or [True] (define in put_checkbox_)\n \"\"\"\n if isinstance(val, list):\n if len(val) == 0:\n return False\n else:\n return True\n elif valuetype:\n return str2type[valuetype](val)\n elif isinstance(val, (int, float)):\n return val\n else:\n try:\n v = float(val)\n except ValueError:\n return val\n if v.is_integer():\n return int(v)\n else:\n return v" }, { "identifier": "raise_exception", "path": "module/webui/utils.py", "snippet": "def raise_exception(x=3):\n \"\"\"\n For testing purpose\n \"\"\"\n if x > 0:\n raise_exception(x - 1)\n else:\n raise Exception(\"quq\")" }, { "identifier": "re_fullmatch", "path": "module/webui/utils.py", "snippet": "def re_fullmatch(pattern, string):\n if pattern == \"datetime\":\n try:\n datetime.datetime.fromisoformat(string)\n return True\n except ValueError:\n return False\n # elif:\n return re.fullmatch(pattern=pattern, string=string)" }, { "identifier": "BinarySwitchButton", "path": "module/webui/widgets.py", "snippet": "class ScrollableCode:\nclass RichLog:\nclass BinarySwitchButton(Switch):\n def __init__(self, keep_bottom: bool = True) -> None:\n def output(self):\n def append(self, text: str) -> None:\n def scroll(self) -> None:\n def reset(self) -> None:\n def set_scroll(self, b: bool) -> None:\n def __init__(self, scope, font_width=\"0.559\") -> None:\n def render(self, renderable: ConsoleRenderable) -> str:\n def extend(self, text):\n def reset(self):\n def scroll(self) -> None:\n def set_scroll(self, b: bool) -> None:\n def get_width(self):\n def put_log(self, pm: ProcessManager) -> Generator:\n def __init__(\n self,\n get_state,\n label_on,\n label_off,\n onclick_on,\n onclick_off,\n scope,\n color_on=\"success\",\n color_off=\"secondary\",\n ):\n def update_button(self, label, onclick, color):\ndef put_icon_buttons(\n icon_html: str,\n buttons: List[Dict[str, str]],\n onclick: Union[List[Callable[[], None]], Callable[[], None]],\n) -> Output:\ndef put_none() -> Output:\ndef get_title_help(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_input(kwargs: T_Output_Kwargs) -> Output:\ndef product_stored_row(kwargs: T_Output_Kwargs, key, value):\ndef put_arg_stored(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_select(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_state(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_textarea(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_checkbox(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_datetime(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_storage(kwargs: T_Output_Kwargs) -> Optional[Output]:\n def clear_callback():\ndef put_output(output_kwargs: T_Output_Kwargs) -> Optional[Output]:\ndef get_loading_style(shape: str, fill: bool) -> str:\ndef put_loading_text(\n text: str,\n shape: str = \"border\",\n color: str = \"dark\",\n fill: bool = False,\n size: str = \"auto 2px 1fr\",\n):" } ]
import argparse import queue import threading import time import module.webui.lang as lang from datetime import datetime from functools import partial from typing import Dict, List, Optional from pywebio import config as webconfig from pywebio.output import ( Output, clear, close_popup, popup, put_button, put_buttons, put_collapse, put_column, put_error, put_html, put_link, put_loading, put_markdown, put_row, put_scope, put_table, put_text, put_warning, toast, use_scope, ) from pywebio.pin import pin, pin_on_change from pywebio.session import go_app, info, local, register_thread, run_js, set_env from module.config.config import AzurLaneConfig, Function from module.config.utils import ( alas_instance, alas_template, deep_get, deep_iter, deep_set, dict_to_kv, filepath_args, filepath_config, read_file, ) from module.logger import logger from module.webui.base import Frame from module.webui.fake import ( get_config_mod, load_config, ) from module.webui.fastapi import asgi_app from module.webui.lang import _t, t from module.webui.pin import put_input, put_select from module.webui.process_manager import ProcessManager from module.webui.remote_access import RemoteAccess from module.webui.setting import State from module.webui.updater import updater from module.webui.utils import ( Icon, Switch, TaskHandler, add_css, filepath_css, get_alas_config_listen_path, get_localstorage, get_window_visibility_state, login, parse_pin_value, raise_exception, re_fullmatch, ) from module.webui.widgets import ( BinarySwitchButton, RichLog, T_Output_Kwargs, put_icon_buttons, put_loading_text, put_none, put_output, )
14,878
[commit for commit in history], header=[ "SHA1", t("Gui.Update.Author"), t("Gui.Update.Time"), t("Gui.Update.Message"), ], ) def u(state): if state == -1: return clear("updater_loading") clear("updater_state") clear("updater_btn") if state == 0: put_loading("border", "secondary", "updater_loading").style( "--loading-border-fill--" ) put_text(t("Gui.Update.UpToDate"), scope="updater_state") put_button( t("Gui.Button.CheckUpdate"), onclick=updater.check_update, color="info", scope="updater_btn", ) update_table() elif state == 1: put_loading("grow", "success", "updater_loading").style( "--loading-grow--" ) put_text(t("Gui.Update.HaveUpdate"), scope="updater_state") put_button( t("Gui.Button.ClickToUpdate"), onclick=updater.run_update, color="success", scope="updater_btn", ) update_table() elif state == "checking": put_loading("border", "primary", "updater_loading").style( "--loading-border--" ) put_text(t("Gui.Update.UpdateChecking"), scope="updater_state") elif state == "failed": put_loading("grow", "danger", "updater_loading").style( "--loading-grow--" ) put_text(t("Gui.Update.UpdateFailed"), scope="updater_state") put_button( t("Gui.Button.RetryUpdate"), onclick=updater.run_update, color="primary", scope="updater_btn", ) elif state == "start": put_loading("border", "primary", "updater_loading").style( "--loading-border--" ) put_text(t("Gui.Update.UpdateStart"), scope="updater_state") put_button( t("Gui.Button.CancelUpdate"), onclick=updater.cancel, color="danger", scope="updater_btn", ) elif state == "wait": put_loading("border", "primary", "updater_loading").style( "--loading-border--" ) put_text(t("Gui.Update.UpdateWait"), scope="updater_state") put_button( t("Gui.Button.CancelUpdate"), onclick=updater.cancel, color="danger", scope="updater_btn", ) elif state == "run update": put_loading("border", "primary", "updater_loading").style( "--loading-border--" ) put_text(t("Gui.Update.UpdateRun"), scope="updater_state") put_button( t("Gui.Button.CancelUpdate"), onclick=updater.cancel, color="danger", scope="updater_btn", disabled=True, ) elif state == "reload": put_loading("grow", "success", "updater_loading").style( "--loading-grow--" ) put_text(t("Gui.Update.UpdateSuccess"), scope="updater_state") update_table() elif state == "finish": put_loading("grow", "success", "updater_loading").style( "--loading-grow--" ) put_text(t("Gui.Update.UpdateFinish"), scope="updater_state") update_table() elif state == "cancel": put_loading("border", "danger", "updater_loading").style( "--loading-border--" ) put_text(t("Gui.Update.UpdateCancel"), scope="updater_state") put_button( t("Gui.Button.CancelUpdate"), onclick=updater.cancel, color="danger", scope="updater_btn", disabled=True, ) else: put_text( "Something went wrong, please contact develops", scope="updater_state", ) put_text(f"state: {state}", scope="updater_state")
task_handler = TaskHandler() class AlasGUI(Frame): ALAS_MENU: Dict[str, Dict[str, List[str]]] ALAS_ARGS: Dict[str, Dict[str, Dict[str, Dict[str, str]]]] ALAS_STORED: Dict[str, Dict[str, Dict[str, str]]] theme = "default" def initial(self) -> None: self.ALAS_MENU = read_file(filepath_args("menu", self.alas_mod)) self.ALAS_ARGS = read_file(filepath_args("args", self.alas_mod)) self.ALAS_STORED = read_file(filepath_args("stored", self.alas_mod)) self._init_alas_config_watcher() def __init__(self) -> None: super().__init__() # modified keys, return values of pin_wait_change() self.modified_config_queue = queue.Queue() # alas config name self.alas_name = "" self.alas_mod = "alas" self.alas_config = AzurLaneConfig("template") self.initial() @use_scope("aside", clear=True) def set_aside(self) -> None: # TODO: update put_icon_buttons() put_icon_buttons( Icon.DEVELOP, buttons=[ {"label": t("Gui.Aside.Home"), "value": "Home", "color": "aside"} ], onclick=[self.ui_develop], ), for name in alas_instance(): put_icon_buttons( Icon.RUN, buttons=[{"label": name, "value": name, "color": "aside"}], onclick=self.ui_alas, ) put_icon_buttons( Icon.ADD, buttons=[ {"label": t("Gui.Aside.AddAlas"), "value": "AddAlas", "color": "aside"} ], onclick=[self.ui_add_alas], ), @use_scope("header_status") def set_status(self, state: int) -> None: """ Args: state (int): 1 (running) 2 (not running) 3 (warning, stop unexpectedly) 4 (stop for update) 0 (hide) -1 (*state not changed) """ if state == -1: return clear() if state == 1: put_loading_text(t("Gui.Status.Running"), color="success") elif state == 2: put_loading_text(t("Gui.Status.Inactive"), color="secondary", fill=True) elif state == 3: put_loading_text(t("Gui.Status.Warning"), shape="grow", color="warning") elif state == 4: put_loading_text(t("Gui.Status.Updating"), shape="grow", color="success") @classmethod def set_theme(cls, theme="default") -> None: cls.theme = theme State.deploy_config.Theme = theme State.theme = theme webconfig(theme=theme) @use_scope("menu", clear=True) def alas_set_menu(self) -> None: """ Set menu """ put_buttons( [{ "label": t("Gui.MenuAlas.Overview"), "value": "Overview", "color": "menu", }], onclick=[self.alas_overview], ).style(f"--menu-Overview--") for menu, task_data in self.ALAS_MENU.items(): if task_data.get("page") == "tool": _onclick = self.alas_daemon_overview else: _onclick = self.alas_set_group if task_data.get("menu") == "collapse": task_btn_list = [ put_buttons( [{ "label": t(f"Task.{task}.name"), "value": task, "color": "menu", }], onclick=_onclick, ).style(f"--menu-{task}--") for task in task_data.get("tasks", []) ] put_collapse(title=t(f"Menu.{menu}.name"), content=task_btn_list) else: title = t(f"Menu.{menu}.name") put_html('<div class="hr-task-group-box">' '<span class="hr-task-group-line"></span>' f'<span class="hr-task-group-text">{title}</span>' '<span class="hr-task-group-line"></span>' '</div>' ) for task in task_data.get("tasks", []): put_buttons( [{ "label": t(f"Task.{task}.name"), "value": task, "color": "menu", }], onclick=_onclick, ).style(f"--menu-{task}--").style(f"padding-left: 0.75rem") self.alas_overview() @use_scope("content", clear=True) def alas_set_group(self, task: str) -> None: """ Set arg groups from dict """ self.init_menu(name=task) self.set_title(t(f"Task.{task}.name")) put_scope("_groups", [put_none(), put_scope("groups"), put_scope("navigator")]) task_help: str = t(f"Task.{task}.help") if task_help: put_scope( "group__info", scope="groups", content=[put_text(task_help).style("font-size: 1rem")], ) config = self.alas_config.read_file(self.alas_name) for group, arg_dict in deep_iter(self.ALAS_ARGS[task], depth=1): if self.set_group(group, arg_dict, config, task): self.set_navigator(group) @use_scope("groups") def set_group(self, group, arg_dict, config, task): group_name = group[0] output_list: List[Output] = [] for arg, arg_dict in deep_iter(arg_dict, depth=1): output_kwargs: T_Output_Kwargs = arg_dict.copy() # Skip hide display: Optional[str] = output_kwargs.pop("display", None) if display == "hide": continue # Disable elif display == "disabled": output_kwargs["disabled"] = True # Output type output_kwargs["widget_type"] = output_kwargs.pop("type") arg_name = arg[0] # [arg_name,] # Internal pin widget name output_kwargs["name"] = f"{task}_{group_name}_{arg_name}" # Display title output_kwargs["title"] = t(f"{group_name}.{arg_name}.name") # Get value from config value = deep_get( config, [task, group_name, arg_name], output_kwargs["value"] ) # idk value = str(value) if isinstance(value, datetime) else value # Default value output_kwargs["value"] = value # Options output_kwargs["options"] = options = output_kwargs.pop("option", []) # Options label options_label = [] for opt in options: options_label.append(t(f"{group_name}.{arg_name}.{opt}")) output_kwargs["options_label"] = options_label # Help arg_help = t(f"{group_name}.{arg_name}.help") if arg_help == "" or not arg_help: arg_help = None output_kwargs["help"] = arg_help # Invalid feedback output_kwargs["invalid_feedback"] = t("Gui.Text.InvalidFeedBack", value) o = put_output(output_kwargs) if o is not None: # output will inherit current scope when created, override here o.spec["scope"] = f"#pywebio-scope-group_{group_name}" output_list.append(o) if not output_list: return 0 with use_scope(f"group_{group_name}"): put_text(t(f"{group_name}._info.name")) group_help = t(f"{group_name}._info.help") if group_help != "": put_text(group_help) put_html('<hr class="hr-group">') for output in output_list: output.show() return len(output_list) @use_scope("navigator") def set_navigator(self, group): js = f""" $("#pywebio-scope-groups").scrollTop( $("#pywebio-scope-group_{group[0]}").position().top + $("#pywebio-scope-groups").scrollTop() - 59 ) """ put_button( label=t(f"{group[0]}._info.name"), onclick=lambda: run_js(js), color="navigator", ) def set_dashboard(self, arg, arg_dict, config): i18n = arg_dict.get('i18n') if i18n: name = t(i18n) else: name = arg color = arg_dict.get("color", "#777777") nodata = t("Gui.Dashboard.NoData") def set_value(dic): if "total" in dic.get("attrs", []) and config.get("total") is not None: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), put_text(f' / {config.get("total", "")}').style("--dashboard-time--"), ] else: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), ] with use_scope(f"dashboard-row-{arg}", clear=True): put_html(f'<div><div class="dashboard-icon" style="background-color:{color}"></div>'), put_scope(f"dashboard-content-{arg}", [ put_scope(f"dashboard-value-{arg}", set_value(arg_dict)), put_scope(f"dashboard-time-{arg}", [ put_text(f"{name} - {lang.readable_time(config.get('time', ''))}").style("--dashboard-time--"), ]) ]) @use_scope("content", clear=True) def alas_overview(self) -> None: self.init_menu(name="Overview") self.set_title(t(f"Gui.MenuAlas.Overview")) put_scope("overview", [put_scope("schedulers"), put_scope("logs")]) with use_scope("schedulers"): put_scope( "scheduler-bar", [ put_text(t("Gui.Overview.Scheduler")).style( "font-size: 1.25rem; margin: auto .5rem auto;" ), put_scope("scheduler_btn"), ], ) put_scope( "running", [ put_text(t("Gui.Overview.Running")), put_html('<hr class="hr-group">'), put_scope("running_tasks"), ], ) put_scope( "pending", [ put_text(t("Gui.Overview.Pending")), put_html('<hr class="hr-group">'), put_scope("pending_tasks"), ], ) put_scope( "waiting", [ put_text(t("Gui.Overview.Waiting")), put_html('<hr class="hr-group">'), put_scope("waiting_tasks"), ], ) switch_scheduler = BinarySwitchButton( label_on=t("Gui.Button.Stop"), label_off=t("Gui.Button.Start"), onclick_on=lambda: self.alas.stop(), onclick_off=lambda: self.alas.start(None, updater.event), get_state=lambda: self.alas.alive, color_on="off", color_off="on", scope="scheduler_btn", ) log = RichLog("log") with use_scope("logs"): put_scope("log-bar", [ put_scope("log-title", [ put_text(t("Gui.Overview.Log")).style("font-size: 1.25rem; margin: auto .5rem auto;"), put_scope("log-title-btns", [ put_scope("log_scroll_btn"), ]), ]), put_html('<hr class="hr-group">'), put_scope("dashboard", [ # Empty dashboard, values will be updated in alas_update_overview_task() put_scope(f"dashboard-row-{arg}", []) for arg in self.ALAS_STORED.keys() if deep_get(self.ALAS_STORED, keys=[arg, "order"], default=0) # Empty content to left-align last row ] + [put_html("<i></i>")] * min(len(self.ALAS_STORED), 4)) ]) put_scope("log", [put_html("")]) log.console.width = log.get_width() switch_log_scroll = BinarySwitchButton( label_on=t("Gui.Button.ScrollON"), label_off=t("Gui.Button.ScrollOFF"), onclick_on=lambda: log.set_scroll(False), onclick_off=lambda: log.set_scroll(True), get_state=lambda: log.keep_bottom, color_on="on", color_off="off", scope="log_scroll_btn", ) self.task_handler.add(switch_scheduler.g(), 1, True) self.task_handler.add(switch_log_scroll.g(), 1, True) self.task_handler.add(self.alas_update_overview_task, 10, True) self.task_handler.add(log.put_log(self.alas), 0.25, True) def _init_alas_config_watcher(self) -> None: def put_queue(path, value): self.modified_config_queue.put({"name": path, "value": value}) for path in get_alas_config_listen_path(self.ALAS_ARGS): pin_on_change( name="_".join(path), onchange=partial(put_queue, ".".join(path)) ) logger.info("Init config watcher done.") def _alas_thread_update_config(self) -> None: modified = {} while self.alive: try: d = self.modified_config_queue.get(timeout=10) config_name = self.alas_name read = self.alas_config.read_file write = self.alas_config.write_file except queue.Empty: continue modified[d["name"]] = d["value"] while True: try: d = self.modified_config_queue.get(timeout=1) modified[d["name"]] = d["value"] except queue.Empty: self._save_config(modified, config_name, read, write) modified.clear() break def _save_config( self, modified: Dict[str, str], config_name: str, read=State.config_updater.read_file, write=State.config_updater.write_file, ) -> None: try: valid = [] invalid = [] config = read(config_name) for k, v in modified.copy().items(): valuetype = deep_get(self.ALAS_ARGS, k + ".valuetype") v = parse_pin_value(v, valuetype) validate = deep_get(self.ALAS_ARGS, k + ".validate") if not len(str(v)): default = deep_get(self.ALAS_ARGS, k + ".value") modified[k] = default deep_set(config, k, default) valid.append(k) pin["_".join(k.split("."))] = default elif not validate or re_fullmatch(validate, v): deep_set(config, k, v) modified[k] = v valid.append(k) # update Emotion Record if Emotion Value is changed if "Emotion" in k and "Value" in k: k = k.split(".") k[-1] = k[-1].replace("Value", "Record") k = ".".join(k) v = datetime.now().strftime("%Y-%m-%d %H:%M:%S") modified[k] = v deep_set(config, k, v) valid.append(k) pin["_".join(k.split("."))] = v else: modified.pop(k) invalid.append(k) logger.warning(f"Invalid value {v} for key {k}, skip saving.") self.pin_remove_invalid_mark(valid) self.pin_set_invalid_mark(invalid) if modified: toast( t("Gui.Toast.ConfigSaved"), duration=1, position="right", color="success", ) logger.info( f"Save config {filepath_config(config_name)}, {dict_to_kv(modified)}" ) write(config_name, config) except Exception as e: logger.exception(e) def alas_update_overview_task(self) -> None: if not self.visible: return self.alas_config.load() self.alas_config.get_next_task() alive = self.alas.alive if len(self.alas_config.pending_task) >= 1: if self.alas.alive: running = self.alas_config.pending_task[:1] pending = self.alas_config.pending_task[1:] else: running = [] pending = self.alas_config.pending_task[:] else: running = [] pending = [] waiting = self.alas_config.waiting_task def put_task(func: Function): with use_scope(f"overview-task_{func.command}"): put_column( [ put_text(t(f"Task.{func.command}.name")).style("--arg-title--"), put_text(str(func.next_run)).style("--arg-help--"), ], size="auto auto", ) put_button( label=t("Gui.Button.Setting"), onclick=lambda: self.alas_set_group(func.command), color="off", ) if self.scope_expired_then_add("pending_task", [ alive, self.alas_config.pending_task ]): clear("running_tasks") clear("pending_tasks") clear("waiting_tasks") with use_scope("running_tasks"): if running: for task in running: put_task(task) else: put_text(t("Gui.Overview.NoTask")).style("--overview-notask-text--") with use_scope("pending_tasks"): if pending: for task in pending: put_task(task) else: put_text(t("Gui.Overview.NoTask")).style("--overview-notask-text--") with use_scope("waiting_tasks"): if waiting: for task in waiting: put_task(task) else: put_text(t("Gui.Overview.NoTask")).style("--overview-notask-text--") for arg, arg_dict in self.ALAS_STORED.items(): # Skip order=0 if not arg_dict.get("order", 0): continue path = arg_dict["path"] if self.scope_expired_then_add(f"dashboard-time-value-{arg}", [ deep_get(self.alas_config.data, keys=f"{path}.value"), lang.readable_time(deep_get(self.alas_config.data, keys=f"{path}.time")), ]): self.set_dashboard(arg, arg_dict, deep_get(self.alas_config.data, keys=path, default={})) @use_scope("content", clear=True) def alas_daemon_overview(self, task: str) -> None: self.init_menu(name=task) self.set_title(t(f"Task.{task}.name")) log = RichLog("log") if self.is_mobile: put_scope( "daemon-overview", [ put_scope("scheduler-bar"), put_scope("groups"), put_scope("log-bar"), put_scope("log", [put_html("")]), ], ) else: put_scope( "daemon-overview", [ put_none(), put_scope( "_daemon", [ put_scope( "_daemon_upper", [put_scope("scheduler-bar"), put_scope("log-bar")], ), put_scope("groups"), put_scope("log", [put_html("")]), ], ), put_none(), ], ) log.console.width = log.get_width() with use_scope("scheduler-bar"): put_text(t("Gui.Overview.Scheduler")).style( "font-size: 1.25rem; margin: auto .5rem auto;" ) put_scope("scheduler_btn") switch_scheduler = BinarySwitchButton( label_on=t("Gui.Button.Stop"), label_off=t("Gui.Button.Start"), onclick_on=lambda: self.alas.stop(), onclick_off=lambda: self.alas.start(task), get_state=lambda: self.alas.alive, color_on="off", color_off="on", scope="scheduler_btn", ) with use_scope("log-bar"): put_text(t("Gui.Overview.Log")).style( "font-size: 1.25rem; margin: auto .5rem auto;" ) put_scope( "log-bar-btns", [ put_scope("log_scroll_btn"), ], ) switch_log_scroll = BinarySwitchButton( label_on=t("Gui.Button.ScrollON"), label_off=t("Gui.Button.ScrollOFF"), onclick_on=lambda: log.set_scroll(False), onclick_off=lambda: log.set_scroll(True), get_state=lambda: log.keep_bottom, color_on="on", color_off="off", scope="log_scroll_btn", ) config = self.alas_config.read_file(self.alas_name) for group, arg_dict in deep_iter(self.ALAS_ARGS[task], depth=1): if group[0] == "Storage": continue self.set_group(group, arg_dict, config, task) run_js(""" $("#pywebio-scope-log").css( "grid-row-start", -2 - $("#pywebio-scope-_daemon").children().filter( function(){ return $(this).css("display") === "none"; } ).length ); $("#pywebio-scope-log").css( "grid-row-end", -1 ); """) self.task_handler.add(switch_scheduler.g(), 1, True) self.task_handler.add(switch_log_scroll.g(), 1, True) self.task_handler.add(log.put_log(self.alas), 0.25, True) @use_scope("menu", clear=True) def dev_set_menu(self) -> None: self.init_menu(collapse_menu=False, name="Develop") put_button( label=t("Gui.MenuDevelop.HomePage"), onclick=self.show, color="menu", ).style(f"--menu-HomePage--") # put_button( # label=t("Gui.MenuDevelop.Translate"), # onclick=self.dev_translate, # color="menu", # ).style(f"--menu-Translate--") put_button( label=t("Gui.MenuDevelop.Update"), onclick=self.dev_update, color="menu", ).style(f"--menu-Update--") put_button( label=t("Gui.MenuDevelop.Remote"), onclick=self.dev_remote, color="menu", ).style(f"--menu-Remote--") put_button( label=t("Gui.MenuDevelop.Utils"), onclick=self.dev_utils, color="menu", ).style(f"--menu-Utils--") def dev_translate(self) -> None: go_app("translate", new_window=True) lang.TRANSLATE_MODE = True self.show() @use_scope("content", clear=True) def dev_update(self) -> None: self.init_menu(name="Update") self.set_title(t("Gui.MenuDevelop.Update")) if State.restart_event is None: put_warning(t("Gui.Update.DisabledWarn")) put_row( content=[put_scope("updater_loading"), None, put_scope("updater_state")], size="auto .25rem 1fr", ) put_scope("updater_btn") put_scope("updater_info") def update_table(): with use_scope("updater_info", clear=True): local_commit = updater.get_commit(short_sha1=True) upstream_commit = updater.get_commit( f"origin/{updater.Branch}", short_sha1=True ) put_table( [ [t("Gui.Update.Local"), *local_commit], [t("Gui.Update.Upstream"), *upstream_commit], ], header=[ "", "SHA1", t("Gui.Update.Author"), t("Gui.Update.Time"), t("Gui.Update.Message"), ], ) with use_scope("updater_detail", clear=True): put_text(t("Gui.Update.DetailedHistory")) history = updater.get_commit( f"origin/{updater.Branch}", n=20, short_sha1=True ) put_table( [commit for commit in history], header=[ "SHA1", t("Gui.Update.Author"), t("Gui.Update.Time"), t("Gui.Update.Message"), ], ) def u(state): if state == -1: return clear("updater_loading") clear("updater_state") clear("updater_btn") if state == 0: put_loading("border", "secondary", "updater_loading").style( "--loading-border-fill--" ) put_text(t("Gui.Update.UpToDate"), scope="updater_state") put_button( t("Gui.Button.CheckUpdate"), onclick=updater.check_update, color="info", scope="updater_btn", ) update_table() elif state == 1: put_loading("grow", "success", "updater_loading").style( "--loading-grow--" ) put_text(t("Gui.Update.HaveUpdate"), scope="updater_state") put_button( t("Gui.Button.ClickToUpdate"), onclick=updater.run_update, color="success", scope="updater_btn", ) update_table() elif state == "checking": put_loading("border", "primary", "updater_loading").style( "--loading-border--" ) put_text(t("Gui.Update.UpdateChecking"), scope="updater_state") elif state == "failed": put_loading("grow", "danger", "updater_loading").style( "--loading-grow--" ) put_text(t("Gui.Update.UpdateFailed"), scope="updater_state") put_button( t("Gui.Button.RetryUpdate"), onclick=updater.run_update, color="primary", scope="updater_btn", ) elif state == "start": put_loading("border", "primary", "updater_loading").style( "--loading-border--" ) put_text(t("Gui.Update.UpdateStart"), scope="updater_state") put_button( t("Gui.Button.CancelUpdate"), onclick=updater.cancel, color="danger", scope="updater_btn", ) elif state == "wait": put_loading("border", "primary", "updater_loading").style( "--loading-border--" ) put_text(t("Gui.Update.UpdateWait"), scope="updater_state") put_button( t("Gui.Button.CancelUpdate"), onclick=updater.cancel, color="danger", scope="updater_btn", ) elif state == "run update": put_loading("border", "primary", "updater_loading").style( "--loading-border--" ) put_text(t("Gui.Update.UpdateRun"), scope="updater_state") put_button( t("Gui.Button.CancelUpdate"), onclick=updater.cancel, color="danger", scope="updater_btn", disabled=True, ) elif state == "reload": put_loading("grow", "success", "updater_loading").style( "--loading-grow--" ) put_text(t("Gui.Update.UpdateSuccess"), scope="updater_state") update_table() elif state == "finish": put_loading("grow", "success", "updater_loading").style( "--loading-grow--" ) put_text(t("Gui.Update.UpdateFinish"), scope="updater_state") update_table() elif state == "cancel": put_loading("border", "danger", "updater_loading").style( "--loading-border--" ) put_text(t("Gui.Update.UpdateCancel"), scope="updater_state") put_button( t("Gui.Button.CancelUpdate"), onclick=updater.cancel, color="danger", scope="updater_btn", disabled=True, ) else: put_text( "Something went wrong, please contact develops", scope="updater_state", ) put_text(f"state: {state}", scope="updater_state")
updater_switch = Switch(
25
2023-11-01 07:09:45+00:00
24k
radekd91/inferno
inferno/datasets/FaceVideoDataModule.py
[ { "identifier": "TestData", "path": "inferno/datasets/ImageTestDataset.py", "snippet": "class TestData(Dataset):\n def __init__(self, testpath, iscrop=True, crop_size=224, scale=1.25, face_detector='fan',\n scaling_factor=1.0, max_detection=None):\n self.max_detection = max_detection\n if isinstance(testpath, list):\n self.imagepath_list = testpath\n elif os.path.isdir(testpath):\n self.imagepath_list = glob(testpath + '/*.jpg') + glob(testpath + '/*.png') + glob(testpath + '/*.bmp')\n elif os.path.isfile(testpath) and (testpath[-3:] in ['jpg', 'png', 'bmp']):\n self.imagepath_list = [testpath]\n elif os.path.isfile(testpath) and (testpath[-3:] in ['mp4', 'csv', 'vid', 'ebm']):\n self.imagepath_list = video2sequence(testpath)\n else:\n print(f'please check the test path: {testpath}')\n exit()\n print('total {} images'.format(len(self.imagepath_list)))\n self.imagepath_list = sorted(self.imagepath_list)\n self.scaling_factor = scaling_factor\n self.crop_size = crop_size\n self.scale = scale\n self.iscrop = iscrop\n self.resolution_inp = crop_size\n # add_pretrained_deca_to_path()\n # from decalib.datasets import detectors\n if face_detector == 'fan':\n self.face_detector = FAN()\n # elif face_detector == 'mtcnn':\n # self.face_detector = detectors.MTCNN()\n else:\n print(f'please check the detector: {face_detector}')\n exit()\n\n def __len__(self):\n return len(self.imagepath_list)\n\n def __getitem__(self, index):\n imagepath = str(self.imagepath_list[index])\n imagename = imagepath.split('/')[-1].split('.')[0]\n\n image = np.array(imread(imagepath))\n if len(image.shape) == 2:\n image = image[:, :, None].repeat(1, 1, 3)\n if len(image.shape) == 3 and image.shape[2] > 3:\n image = image[:, :, :3]\n\n if self.scaling_factor != 1.:\n image = rescale(image, (self.scaling_factor, self.scaling_factor, 1))*255.\n\n h, w, _ = image.shape\n if self.iscrop:\n # provide kpt as txt file, or mat file (for AFLW2000)\n kpt_matpath = imagepath.replace('.jpg', '.mat').replace('.png', '.mat')\n kpt_txtpath = imagepath.replace('.jpg', '.txt').replace('.png', '.txt')\n if os.path.exists(kpt_matpath):\n kpt = scipy.io.loadmat(kpt_matpath)['pt3d_68'].T\n left = np.min(kpt[:, 0])\n right = np.max(kpt[:, 0])\n top = np.min(kpt[:, 1])\n bottom = np.max(kpt[:, 1])\n old_size, center = bbox2point(left, right, top, bottom, type='kpt68')\n elif os.path.exists(kpt_txtpath):\n kpt = np.loadtxt(kpt_txtpath)\n left = np.min(kpt[:, 0])\n right = np.max(kpt[:, 0])\n top = np.min(kpt[:, 1])\n bottom = np.max(kpt[:, 1])\n old_size, center = bbox2point(left, right, top, bottom, type='kpt68')\n else:\n # bbox, bbox_type, landmarks = self.face_detector.run(image)\n bbox, bbox_type = self.face_detector.run(image)\n if len(bbox) < 1:\n print('no face detected! run original image')\n left = 0\n right = h - 1\n top = 0\n bottom = w - 1\n old_size, center = bbox2point(left, right, top, bottom, type=bbox_type)\n else:\n if self.max_detection is None:\n bbox = bbox[0]\n left = bbox[0]\n right = bbox[2]\n top = bbox[1]\n bottom = bbox[3]\n old_size, center = bbox2point(left, right, top, bottom, type=bbox_type)\n else: \n old_size, center = [], []\n num_det = min(self.max_detection, len(bbox))\n for bbi in range(num_det):\n bb = bbox[0]\n left = bb[0]\n right = bb[2]\n top = bb[1]\n bottom = bb[3]\n osz, c = bbox2point(left, right, top, bottom, type=bbox_type)\n old_size += [osz]\n center += [c]\n \n if isinstance(old_size, list):\n size = []\n src_pts = []\n for i in range(len(old_size)):\n size += [int(old_size[i] * self.scale)]\n src_pts += [np.array(\n [[center[i][0] - size[i] / 2, center[i][1] - size[i] / 2], [center[i][0] - size[i] / 2, center[i][1] + size[i] / 2],\n [center[i][0] + size[i] / 2, center[i][1] - size[i] / 2]])]\n else:\n size = int(old_size * self.scale)\n src_pts = np.array(\n [[center[0] - size / 2, center[1] - size / 2], [center[0] - size / 2, center[1] + size / 2],\n [center[0] + size / 2, center[1] - size / 2]])\n else:\n src_pts = np.array([[0, 0], [0, h - 1], [w - 1, 0]])\n \n image = image / 255.\n if not isinstance(src_pts, list):\n DST_PTS = np.array([[0, 0], [0, self.resolution_inp - 1], [self.resolution_inp - 1, 0]])\n tform = estimate_transform('similarity', src_pts, DST_PTS)\n dst_image = warp(image, tform.inverse, output_shape=(self.resolution_inp, self.resolution_inp), order=3)\n dst_image = dst_image.transpose(2, 0, 1)\n return {'image': torch.tensor(dst_image).float(),\n 'image_name': imagename,\n 'image_path': imagepath,\n # 'tform': tform,\n # 'original_image': torch.tensor(image.transpose(2,0,1)).float(),\n }\n else:\n DST_PTS = np.array([[0, 0], [0, self.resolution_inp - 1], [self.resolution_inp - 1, 0]])\n dst_images = []\n for i in range(len(src_pts)):\n tform = estimate_transform('similarity', src_pts[i], DST_PTS)\n dst_image = warp(image, tform.inverse, output_shape=(self.resolution_inp, self.resolution_inp), order=3)\n dst_image = dst_image.transpose(2, 0, 1)\n dst_images += [dst_image]\n dst_images = np.stack(dst_images, axis=0)\n \n imagenames = [imagename + f\"{j:02d}\" for j in range(dst_images.shape[0])]\n imagepaths = [imagepath]* dst_images.shape[0]\n return {'image': torch.tensor(dst_images).float(),\n 'image_name': imagenames,\n 'image_path': imagepaths,\n # 'tform': tform,\n # 'original_image': torch.tensor(image.transpose(2,0,1)).float(),\n }" }, { "identifier": "FaceDataModuleBase", "path": "inferno/datasets/FaceDataModuleBase.py", "snippet": "class FaceDataModuleBase(pl.LightningDataModule):\n \"\"\"\n A base data module for face datasets. This DM can be inherited by any face datasets, which just adapt things \n to the dataset's specificities (such as different GT or data storage structure). \n This class can take care of face detection, recognition, segmentation and landmark detection.\n \"\"\"\n\n def __init__(self, root_dir, output_dir, processed_subfolder, device=None,\n face_detector='fan',\n face_detector_threshold=0.9,\n image_size=224,\n scale=1.25,\n bb_center_shift_x=0., # in relative numbers\n bb_center_shift_y=0., # in relative numbers (i.e. -0.1 for 10% shift upwards, ...)\n processed_ext=\".png\", \n save_detection_images=True, \n save_landmarks_frame_by_frame=True, # default\n save_landmarks_one_file=False, # only use for large scale video datasets (that would produce too many files otherwise)\n save_segmentation_frame_by_frame=True, # default\n save_segmentation_one_file=False, # only use for large scale video datasets (that would produce too many files otherwise)\n return_mica_images=False,\n ):\n super().__init__()\n self.root_dir = root_dir\n self.output_dir = output_dir\n self.bb_center_shift_x = bb_center_shift_x\n self.bb_center_shift_y = bb_center_shift_y\n self.processed_ext = processed_ext\n self.save_detection_images=save_detection_images\n self.save_landmarks_frame_by_frame = save_landmarks_frame_by_frame\n self.save_landmarks_one_file = save_landmarks_one_file\n assert not (save_landmarks_one_file and save_landmarks_frame_by_frame) # only one of them can be true\n self.save_segmentation_frame_by_frame = save_segmentation_frame_by_frame\n self.save_segmentation_one_file = save_segmentation_one_file\n assert not (save_segmentation_one_file and save_segmentation_frame_by_frame) # only one of them can be true\n\n if processed_subfolder is None:\n import datetime\n date = datetime.datetime.now()\n processed_folder = os.path.join(output_dir, \"processed_%s\" % date.strftime(\"%Y_%b_%d_%H-%M-%S\"))\n else:\n processed_folder = os.path.join(output_dir, processed_subfolder)\n self.output_dir = processed_folder\n\n self.device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n self.face_detector_type = face_detector\n self.face_detector_threshold = face_detector_threshold\n\n self.image_size = image_size\n self.scale = scale\n self.return_mica_images = return_mica_images\n\n def _get_max_faces_per_image(self): \n return 1\n \n def _is_video_dataset(self): \n return False\n\n # @profile\n def _instantiate_detector(self, overwrite = False, face_detector=None):\n face_detector = face_detector or self.face_detector_type\n if hasattr(self, 'face_detector'):\n if not overwrite:\n return\n del self.face_detector\n if self.face_detector_type == 'fan':\n self.face_detector = FAN(self.device, threshold=self.face_detector_threshold, mode='2D')\n elif self.face_detector_type == 'fan3d':\n self.face_detector = FAN(self.device, threshold=self.face_detector_threshold, mode='3D')\n elif self.face_detector_type == 'mtcnn':\n self.face_detector = MTCNN(self.device)\n elif self.face_detector_type == '3fabrec': \n from inferno.utils.TFabRecLandmarkDetector import TFabRec\n self.face_detector = TFabRec(instantiate_detector='sfd', threshold=self.face_detector_threshold)\n elif self.face_detector_type == 'mediapipe': \n from inferno.utils.MediaPipeLandmarkDetector import MediaPipeLandmarkDetector\n self.face_detector = MediaPipeLandmarkDetector(threshold=self.face_detector_threshold, \n video_based=self._is_video_dataset(), max_faces=self._get_max_faces_per_image())\n elif self.face_detector_type == 'deep3dface': \n from inferno.utils.Deep3DFaceLandmarkDetector import Deep3DFaceLandmarkDetector\n self.face_detector = Deep3DFaceLandmarkDetector(instantiate_detector='mtcnn')\n else:\n raise ValueError(\"Invalid face detector specifier '%s'\" % self.face_detector)\n\n # @profile\n def _detect_faces_in_image(self, image_or_path, detected_faces=None):\n # imagepath = self.imagepath_list[index]\n # imagename = imagepath.split('/')[-1].split('.')[0]\n if isinstance(image_or_path, (str, Path)):\n image = np.array(imread(image_or_path))\n elif isinstance(image_or_path, np.ndarray):\n image = image_or_path\n else: \n raise ValueError(\"Invalid image type '%s'\" % type(image_or_path)) \n \n if len(image.shape) == 2:\n image = np.tile(image[:, :, None], (1, 1, 3))\n if len(image.shape) == 3 and image.shape[2] > 3:\n image = image[:, :, :3]\n\n h, w, _ = image.shape\n self._instantiate_detector()\n bounding_boxes, bbox_type, landmarks = self.face_detector.run(image,\n with_landmarks=True,\n detected_faces=detected_faces)\n image = image / 255.\n detection_images = []\n detection_centers = []\n detection_sizes = []\n detection_landmarks = [] # landmarks wrt the detection image\n # original_landmarks = [] # landmarks wrt the original image\n original_landmarks = landmarks # landmarks wrt the original image\n # detection_embeddings = []\n if len(bounding_boxes) == 0:\n # print('no face detected! run original image')\n return detection_images, detection_centers, detection_images, \\\n bbox_type, detection_landmarks, original_landmarks\n # left = 0\n # right = h - 1\n # top = 0\n # bottom = w - 1\n # bounding_boxes += [[left, right, top, bottom]]\n\n for bi, bbox in enumerate(bounding_boxes):\n left = bbox[0]\n right = bbox[2]\n top = bbox[1]\n bottom = bbox[3]\n old_size, center = bbox2point(left, right, top, bottom, type=bbox_type)\n\n center[0] += abs(right-left)*self.bb_center_shift_x\n center[1] += abs(bottom-top)*self.bb_center_shift_y\n\n size = int(old_size * self.scale)\n\n dst_image, dts_landmark = bbpoint_warp(image, center, size, self.image_size, landmarks=landmarks[bi])\n\n # dst_image = dst_image.transpose(2, 0, 1)\n #\n detection_images += [(dst_image*255).astype(np.uint8)]\n detection_centers += [center]\n detection_sizes += [size]\n\n # imsave(os.path.join(\"detection_%d.png\" % bi), dst_image)\n\n # to be checked\n detection_landmarks += [dts_landmark]\n\n del image\n return detection_images, detection_centers, detection_sizes, bbox_type, detection_landmarks, original_landmarks\n\n # @profile\n def _detect_faces_in_image_wrapper(self, frame_list, fid, out_detection_folder, out_landmark_folder, bb_outfile,\n centers_all, sizes_all, detection_fnames_all, landmark_fnames_all, \n out_landmarks_all=None, out_landmarks_orig_all=None, out_bbox_type_all=None):\n\n if isinstance(frame_list, (str, Path, list)):\\\n # if frame list is a list of image paths\n frame_fname = frame_list[fid]\n # detect faces in each frames\n detection_ims, centers, sizes, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(Path(self.output_dir) / frame_fname)\n elif isinstance(frame_list, (np.ndarray, types.GeneratorType)): \n # frame_list is an array of many images, or a generator (like a video reader)\n frame_fname =Path(f\"{fid:05d}.png\")\n if isinstance(frame_list, np.ndarray):\n frame = frame_list[fid]\n else: \n frame = next(frame_list)\n detection_ims, centers, sizes, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(frame)\n # if len(detection_ims) > 0: # debug visualization\n # imsave(frame_fname, detection_ims[0])\n \n # self.detection_lists[sequence_id][fid] += [detections]\n # import plotly.graph_objects as go\n # fig = go.Figure(data=go.Image(z=frame,))\n # fig.show()\n\n \n centers_all += [centers]\n sizes_all += [sizes]\n if out_landmarks_all is not None:\n out_landmarks_all += [landmarks]\n if out_landmarks_orig_all is not None:\n out_landmarks_orig_all += [orig_landmarks]\n if out_bbox_type_all is not None:\n out_bbox_type_all += [[bbox_type]*len(landmarks)]\n\n # save detections\n detection_fnames = []\n landmark_fnames = []\n for di, detection in enumerate(detection_ims):\n # save detection\n stem = frame_fname.stem + \"_%.03d\" % di\n if self.save_detection_images:\n out_detection_fname = out_detection_folder / (stem + self.processed_ext)\n detection_fnames += [out_detection_fname.relative_to(self.output_dir)]\n if self.processed_ext in ['.JPG', '.jpg', \".jpeg\", \".JPEG\"]:\n imsave(out_detection_fname, detection, quality=100)\n else:\n imsave(out_detection_fname, detection)\n # save landmarks\n if self.save_landmarks_frame_by_frame:\n if self.save_detection_images:\n out_landmark_fname = out_landmark_folder / (stem + \".pkl\")\n landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)]\n save_landmark(out_landmark_fname, landmarks[di], bbox_type)\n else: \n out_landmark_fname = out_landmark_folder / (stem + \".pkl\")\n landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)]\n save_landmark(out_landmark_fname, orig_landmarks[di], bbox_type)\n\n detection_fnames_all += [detection_fnames]\n landmark_fnames_all += [landmark_fnames]\n\n torch.cuda.empty_cache()\n checkpoint_frequency = 100\n if fid % checkpoint_frequency == 0:\n FaceDataModuleBase.save_detections(bb_outfile, detection_fnames_all, landmark_fnames_all,\n centers_all, sizes_all, fid)\n\n\n def _get_segmentation_method(self): \n return \"focus\"\n # return \"bisenet\"\n\n\n def _segment_images(self, detection_fnames_or_ims, out_segmentation_folder, path_depth = 0, landmarks=None, segmentation_net=None):\n import time\n # segmentation_net = segmentation_net or \"bisenet\"\n segmentation_net = segmentation_net or self._get_segmentation_method()\n if self.save_landmarks_one_file: \n overwrite = False \n # single_out_file = out_segmentation_folder / \"segmentations.pkl\"\n single_out_file = out_segmentation_folder / \"segmentations.hdf5\"\n if single_out_file.is_file() and not overwrite:\n print(f\"Segmentation already found in {single_out_file}, skipping\")\n return\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(device)\n net, seg_type, batch_size = self._get_segmentation_net(device, segmentation_net)\n\n # if self.save_detection_images:\n # ref_im = imread(detection_fnames_or_ims[0])\n # else: \n # ref_im = detection_fnames_or_ims[0]\n # ref_size = Resize((ref_im.shape[0], ref_im.shape[1]), interpolation=Image.NEAREST)\n ref_size = None\n\n # transforms = Compose([\n # Resize((512, 512)),\n # Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n # ])\n transforms=None\n # batch_size = 16\n\n if isinstance(detection_fnames_or_ims, types.GeneratorType): \n im_read = \"skvreader\"\n elif isinstance(detection_fnames_or_ims, (FFmpegReader)):\n im_read = \"skvffmpeg\"\n else:\n im_read = 'pil' if not isinstance(detection_fnames_or_ims[0], np.ndarray) else None\n\n dataset = UnsupervisedImageDataset(detection_fnames_or_ims, image_transforms=transforms,\n landmark_list = landmarks,\n im_read=im_read)\n loader = DataLoader(dataset, batch_size=batch_size, num_workers=4 if im_read not in [\"skvreader\", \"skvffmpeg\"] else 1, \n shuffle=False)\n\n # import matplotlib.pyplot as plt\n\n if self.save_segmentation_one_file: \n out_segmentation_names = []\n out_segmentations = []\n out_segmentation_types = []\n\n for i, batch in enumerate(tqdm(loader)):\n # facenet_pytorch expects this stanadrization for the input to the net\n # images = fixed_image_standardization(batch['image'].to(device))\n images = batch['image'].cuda()\n # start = time.time()\n with torch.no_grad():\n segmentation = net(images)\n # end = time.time()\n\n if ref_size is None:\n ref_size = Resize((images.shape[2], images.shape[3]), interpolation=Image.NEAREST)\n\n segmentation = ref_size(segmentation)\n segmentation = segmentation.cpu().numpy()\n\n if self.save_segmentation_frame_by_frame:\n start = time.time()\n for j in range(segmentation.shape[0]):\n image_path = batch['path'][j]\n # if isinstance(out_segmentation_folder, list):\n if path_depth > 0:\n rel_path = Path(image_path).parent.relative_to(Path(image_path).parents[path_depth])\n segmentation_path = out_segmentation_folder / rel_path / (Path(image_path).stem + \".pkl\")\n else:\n segmentation_path = out_segmentation_folder / (Path(image_path).stem + \".pkl\")\n segmentation_path.parent.mkdir(exist_ok=True, parents=True)\n # im = images[j]\n # im = im.permute(1,2,0).cpu().numpy()\n # from inferno.datasets.IO import process_segmentation \n # import matplotlib.pyplot as plt\n # from inferno.datasets.FaceVideoDataModule import FaceDataModuleBase\n # seg = process_segmentation(segmentation[j], seg_type)\n # imsave(\"seg.png\", seg)\n # imsave(\"im.png\", im)\n # FaceDataModuleBase.vis_parsing_maps(im, segmentation[j], stride=1, save_im=True,\n # save_path='overlay.png')\n # plt.figure()\n # plt.imshow(im)\n # plt.show()\n # plt.figure()\n # plt.imshow(seg[0])\n # plt.show()\n save_segmentation(segmentation_path, segmentation[j], seg_type)\n print(f\" Saving batch {i} took: {end - start}\")\n end = time.time()\n if self.save_segmentation_one_file: \n segmentation_names = []\n segmentations = []\n for j in range(segmentation.shape[0]):\n image_path = batch['path'][j]\n if path_depth > 0:\n rel_path = Path(image_path).parent.relative_to(Path(image_path).parents[path_depth])\n segmentation_path = rel_path / (Path(image_path).stem + \".pkl\")\n else:\n segmentation_path = Path(image_path).stem \n segmentation_names += [segmentation_path]\n segmentations += [segmentation[j]]\n out_segmentation_names += segmentation_names\n out_segmentations += segmentations\n out_segmentation_types += [seg_type] * len(segmentation_names)\n\n if self.save_landmarks_one_file: \n if single_out_file.suffix == \".pkl\":\n save_segmentation_list(single_out_file, out_segmentations, out_segmentation_types, out_segmentation_names)\n elif single_out_file.suffix == \".hdf5\":\n save_segmentation_list_v2(single_out_file, out_segmentations, out_segmentation_types, out_segmentation_names)\n print(\"Segmentation saved to %s\" % single_out_file)\n\n\n def _get_segmentation_net(self, device, method='bisenet'):\n if method == 'bisenet':\n seg_type = 'face_parsing'\n if hasattr(self, \"_bisenet\" ): \n net = self._bisenet\n else:\n from inferno.models.external.BiSeNetFaceParsing import BiSeNetFaceParsing\n net = BiSeNetFaceParsing()\n self._bisenet = net\n batch_size = 64\n elif method == \"gpen\": \n seg_type = 'face_parsing_gpen'\n if hasattr(self, \"_gpen\" ): \n net = self._gpen\n else:\n from inferno.models.external.GPENFaceParsing import GPENFaceParsing\n net = GPENFaceParsing()\n self._gpen = net\n batch_size = 16\n elif method == \"focus\": \n seg_type = 'face_segmentation_focus'\n if hasattr(self, \"_focus\" ): \n net = self._focus\n else:\n from inferno.models.external.FocusSegmentation import FocusSegmentation\n net = FocusSegmentation()\n self._focus = net\n batch_size = 16\n # batch_size = 16\n else: \n raise ValueError(f\"Unknown segmentation type: {method}\" )\n\n # from inferno.utils.other import get_path_to_externals\n # path_to_segnet = get_path_to_externals() / \"face-parsing.PyTorch\"\n # if not(str(path_to_segnet) in sys.path or str(path_to_segnet.absolute()) in sys.path):\n # sys.path += [str(path_to_segnet)]\n\n # from model import BiSeNet\n # n_classes = 19\n # net = BiSeNet(n_classes=n_classes)\n # # net.cuda()\n # save_pth = path_to_segnet / 'res' / 'cp' / '79999_iter.pth'\n # net.load_state_dict(torch.load(save_pth))\n # # net.eval()\n # net.eval().to(device)\n\n # labels = {\n # 0: 'background',\n # 1: 'skin',\n # 2: 'nose',\n # 3: 'eye_g',\n # 4: 'l_eye',\n # 5: 'r_eye',\n # 6: 'l_brow',\n # 7: 'r_brow',\n # 8: 'l_ear',\n # 9: 'r_ear',\n # 10: 'mouth',\n # 11: 'u_lip',\n # 12: 'l_lip',\n # 13: 'hair',\n # 14: 'hat',\n # 15: 'ear_r',\n # 16: 'neck_l',\n # 17: 'neck',\n # 18: 'cloth'\n # }\n\n return net, seg_type , batch_size\n\n\n @staticmethod\n def save_landmark_list(fname, landmarks):\n with open(fname, \"wb\" ) as f:\n pkl.dump(landmarks, f)\n\n @staticmethod\n def load_landmark_list(fname):\n with open(fname, \"rb\" ) as f:\n landmarks = pkl.load(f)\n return landmarks\n\n\n @staticmethod\n def save_landmark_list_v2(fname, landmarks, landmark_confidences, landmark_types):\n with open(fname, \"wb\" ) as f:\n pkl.dump(landmarks, f)\n pkl.dump(landmark_confidences, f)\n pkl.dump(landmark_types, f)\n\n @staticmethod\n def load_landmark_list_v2(fname):\n with open(fname, \"rb\" ) as f:\n landmarks = pkl.load(f)\n landmark_confidences = pkl.load(f)\n landmark_types = pkl.load(f)\n return landmarks, landmark_confidences, landmark_types\n\n\n @staticmethod\n def save_detections(fname, detection_fnames, landmark_fnames, centers, sizes, last_frame_id):\n with open(fname, \"wb\" ) as f:\n pkl.dump(detection_fnames, f)\n pkl.dump(centers, f)\n pkl.dump(sizes, f)\n pkl.dump(last_frame_id, f)\n pkl.dump(landmark_fnames, f)\n\n @staticmethod\n def load_detections(fname):\n with open(fname, \"rb\" ) as f:\n detection_fnames = pkl.load(f)\n centers = pkl.load(f)\n sizes = pkl.load(f)\n try:\n last_frame_id = pkl.load(f)\n except:\n last_frame_id = -1\n try:\n landmark_fnames = pkl.load(f)\n except:\n landmark_fnames = [None]*len(detection_fnames)\n\n return detection_fnames, landmark_fnames, centers, sizes, last_frame_id" }, { "identifier": "point2bbox", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def point2bbox(center, size):\n size2 = size / 2\n\n src_pts = np.array(\n [[center[0] - size2, center[1] - size2], [center[0] - size2, center[1] + size2],\n [center[0] + size2, center[1] - size2]])\n return src_pts" }, { "identifier": "bbpoint_warp", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbpoint_warp(image, center, size, target_size_height, target_size_width=None, output_shape=None, inv=True, landmarks=None, \n order=3 # order of interpolation, bicubic by default\n ):\n target_size_width = target_size_width or target_size_height\n tform = point2transform(center, size, target_size_height, target_size_width)\n tf = tform.inverse if inv else tform\n output_shape = output_shape or (target_size_height, target_size_width)\n dst_image = warp(image, tf, output_shape=output_shape, order=order)\n if landmarks is None:\n return dst_image\n # points need the matrix\n if isinstance(landmarks, np.ndarray):\n assert isinstance(landmarks, np.ndarray)\n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = tf_lmk(landmarks[:, :2])\n elif isinstance(landmarks, list): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = [] \n for i in range(len(landmarks)):\n dst_landmarks += [tf_lmk(landmarks[i][:, :2])]\n elif isinstance(landmarks, dict): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = {}\n for key, value in landmarks.items():\n dst_landmarks[key] = tf_lmk(landmarks[key][:, :2])\n else: \n raise ValueError(\"landmarks must be np.ndarray, list or dict\")\n return dst_image, dst_landmarks" }, { "identifier": "UnsupervisedImageDataset", "path": "inferno/datasets/UnsupervisedImageDataset.py", "snippet": "class UnsupervisedImageDataset(torch.utils.data.Dataset):\n\n def __init__(self, image_list, landmark_list=None, image_transforms=None, im_read=None, \n align_landmarks=False):\n super().__init__()\n self.image_list = image_list\n self.landmark_list = landmark_list\n if landmark_list is not None and len(landmark_list) != len(image_list):\n raise RuntimeError(\"There must be a landmark for every image\")\n self.image_transforms = image_transforms\n self.im_read = im_read or 'skio'\n if self.im_read in ['skvreader', 'skvffmpeg']:\n self.ordered = True\n self.last_index = -1\n else: \n self.ordered = False\n if self.im_read == 'skvffmpeg':\n self.next_frame_it = self.image_list.nextFrame()\n\n if isinstance(self.image_list, np.ndarray): \n self.im_read = None\n\n def __getitem__(self, index):\n if self.ordered: \n if index != self.last_index + 1:\n raise RuntimeError(\"The images must be read in order because of the skvideo reader\")\n self.last_index = index\n # if index < len(self.image_list):\n # x = self.mnist_data[index]\n # raise IndexError(\"Out of bounds\")\n try:\n if isinstance(self.image_list, np.ndarray):\n img = self.image_list[index].transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img)\n path = f\"{index:05d}\"\n elif self.im_read == 'skio':\n img = imread(self.image_list[index])\n img = img.transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img)\n path = str(self.image_list[index])\n elif self.im_read == 'pil':\n img = Image.open(self.image_list[index])\n img_torch = ToTensor()(img)\n path = str(self.image_list[index])\n # path = f\"{index:05d}\"\n elif self.im_read == 'skvreader':\n img = next(self.image_list)\n img = img.transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img) / 255.\n path = f\"{index:05d}\"\n elif self.im_read == 'skvffmpeg':\n img = next(self.next_frame_it)\n img = img.transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img) / 255.\n path = f\"{index:05d}\"\n else:\n raise ValueError(f\"Invalid image reading method {self.im_read}\")\n except Exception as e:\n print(f\"Failed to read '{self.image_list[index]}'. File is probably corrupted. Rerun data processing\")\n raise e\n\n if self.image_transforms is not None:\n img_torch = self.image_transforms(img_torch)\n\n batch = {\"image\" : img_torch,\n \"path\" : path}\n\n if self.landmark_list is not None:\n landmark_type, landmark = load_landmark(self.landmark_list[index])\n landmark_torch = torch.from_numpy(landmark)\n\n if self.image_transforms is not None:\n landmark_torch = self.image_transforms(landmark_torch)\n\n batch[\"landmark\"] = landmark_torch\n\n return batch\n\n def __len__(self):\n if self.im_read in ['skvreader', 'skvffmpeg']:\n return self.image_list.getShape()[0]\n return len(self.image_list)" }, { "identifier": "save_emotion", "path": "inferno/datasets/IO.py", "snippet": "def save_emotion(filename, emotion_features, emotion_type, version=0):\n with open(filename, \"wb\") as f:\n # for some reason compressed pickle can only load one object (EOF bug)\n # so put it in the list\n cpkl.dump([version, emotion_type, emotion_features], f, compression='gzip')" }, { "identifier": "save_segmentation_list", "path": "inferno/datasets/IO.py", "snippet": "def save_segmentation_list(filename, seg_images, seg_types, seg_names):\n with open(filename, \"wb\") as f:\n # for some reason compressed pickle can only load one object (EOF bug)\n # so put it in the list\n cpkl.dump([seg_types, seg_images, seg_names], f, compression='gzip')\n # pkl.dump(seg_type, f)\n # pkl.dump(seg_image, f)" }, { "identifier": "save_reconstruction_list", "path": "inferno/datasets/IO.py", "snippet": "def save_reconstruction_list(filename, reconstructions):\n _save_hickle_file(reconstructions, filename)\n # hkl.dump(reconstructions, filename)" }, { "identifier": "save_reconstruction_list_v2", "path": "inferno/datasets/IO.py", "snippet": "def save_reconstruction_list_v2(filename, reconstructions, overwrite=False):\n _save_hdf5_dict(reconstructions, filename)" }, { "identifier": "save_emotion_list", "path": "inferno/datasets/IO.py", "snippet": "def save_emotion_list(filename, emotions):\n _save_hickle_file(emotions, filename)\n # hkl.dump(emotions, filename)" }, { "identifier": "save_emotion_list_v2", "path": "inferno/datasets/IO.py", "snippet": "def save_emotion_list_v2(filename, emotions, overwrite=False):\n _save_hdf5_dict(emotions, filename, overwrite)" }, { "identifier": "load_reconstruction_list_v2", "path": "inferno/datasets/IO.py", "snippet": "def load_reconstruction_list_v2(filename, start_frame=None, end_frame=None):\n return _load_hdf5_dict(filename, start_frame, end_frame)" }, { "identifier": "get_path_to_assets", "path": "inferno/utils/other.py", "snippet": "def get_path_to_assets() -> Path:\n import inferno\n return Path(inferno.__file__).parents[1] / \"assets\"" }, { "identifier": "dict_to_device", "path": "inferno/utils/batch.py", "snippet": "def dict_to_device(d, device): \n for k, v in d.items():\n if isinstance(v, torch.Tensor):\n d[k] = v.to(device)\n elif isinstance(v, dict):\n d[k] = dict_to_device(v, device)\n else: \n pass\n return d" }, { "identifier": "VideoFaceDetectionDataset", "path": "inferno/datasets/VideoFaceDetectionDataset.py", "snippet": "class VideoFaceDetectionDataset(torch.utils.data.Dataset):\n\n def __init__(self, video_name, landmark_path, image_transforms=None, \n align_landmarks=False, vid_read=None, output_im_range=None, \n scale_adjustment=1.25,\n target_size_height=256, \n target_size_width=256,\n ):\n super().__init__()\n self.video_name = video_name\n self.landmark_path = landmark_path / \"landmarks_original.pkl\"\n # if landmark_list is not None and len(lanmark_file_name) != len(image_list):\n # raise RuntimeError(\"There must be a landmark for every image\")\n self.image_transforms = image_transforms\n self.vid_read = vid_read or 'skvreader' # 'skvread'\n self.prev_index = -1\n\n self.scale_adjustment=scale_adjustment\n self.target_size_height=target_size_height\n self.target_size_width=target_size_width\n\n self.video_frames = None \n if self.vid_read == \"skvread\": \n self.video_frames = vread(str(self.video_name))\n elif self.vid_read == \"skvreader\": \n self.video_frames = vreader(str(self.video_name))\n\n with open(self.landmark_path, \"rb\") as f: \n self.landmark_list = pkl.load(f)\n\n with open(landmark_path / \"landmark_types.pkl\", \"rb\") as f: \n self.landmark_types = pkl.load(f)\n \n self.total_len = 0 \n self.frame_map = {} # detection index to frame map\n self.index_for_frame_map = {} # detection index to frame map\n for i in range(len(self.landmark_list)): \n for j in range(len(self.landmark_list[i])): \n self.frame_map[self.total_len + j] = i\n self.index_for_frame_map[self.total_len + j] = j\n self.total_len += len(self.landmark_list[i])\n\n self.output_im_range = output_im_range\n self.next_frame_idx = 0 \n self.previous_frame = None\n\n\n def __getitem__(self, index):\n # if index < len(self.image_list):\n # x = self.mnist_data[index]\n # raise IndexError(\"Out of bounds\")\n if index != self.prev_index+1 and self.vid_read != 'skvread': \n raise RuntimeError(\"This dataset is meant to be accessed in ordered way only (and with 0 or 1 workers)\")\n\n frame_index = self.frame_map[index]\n detection_in_frame_index = self.index_for_frame_map[index]\n landmark = self.landmark_list[frame_index][detection_in_frame_index]\n landmark_type = self.landmark_types[frame_index][detection_in_frame_index]\n\n if isinstance(self.video_frames, np.ndarray): \n img = self.video_frames[frame_index, ...]\n elif isinstance(self.video_frames, GeneratorType):\n img = next(self.video_frames)\n\n ## make sure the next frame to be read and the current frame are the same \n if frame_index > self.next_frame_idx: \n ## this can happen if a bunch of frames had no detections\n while True:\n self.next_frame_idx +=1 \n img = next(self.video_frames)\n if self.next_frame_idx == frame_index:\n break\n if self.next_frame_idx == detection_in_frame_index: \n self.video_frames = vreader(str(self.video_name))\n self.next_frame_idx = frame_index \n img = next(self.video_frames)\n self.previous_frame = img.copy()\n self.next_frame_idx += 1\n elif self.next_frame_idx -1 == frame_index:\n assert self.previous_frame is not None, \"No previous frame has been read yet\"\n else: \n raise RuntimeError(\"This dataset is meant to be accessed in ordered way only (and with 0 or 1 workers)\")\n else: \n raise NotImplementedError() \n\n # try:\n # if self.vid_read == 'skvread':\n # img = vread(self.image_list[index])\n # img = img.transpose([2, 0, 1]).astype(np.float32)\n # img_torch = torch.from_numpy(img)\n # path = str(self.image_list[index])\n # elif self.vid_read == 'pil':\n # img = Image.open(self.image_list[index])\n # img_torch = ToTensor()(img)\n # path = str(self.image_list[index])\n # # path = f\"{index:05d}\"\n # else:\n # raise ValueError(f\"Invalid image reading method {self.im_read}\")\n # except Exception as e:\n # print(f\"Failed to read '{self.image_list[index]}'. File is probably corrupted. Rerun data processing\")\n # raise e\n\n # crop out the face\n img = align_face(img, landmark, landmark_type, scale_adjustment=1.25, target_size_height=256, target_size_width=256,)\n if self.output_im_range == 255: \n img = img * 255.0\n img = img.astype(np.float32)\n img_torch = ToTensor()(img)\n\n # # plot img with pyplot \n # import matplotlib.pyplot as plt\n # plt.figure()\n # plt.imshow(img)\n # plt.show()\n # # plot image with plotly\n # import plotly.graph_objects as go\n # fig = go.Figure(data=go.Image(z=img*255.,))\n # fig.show()\n\n\n if self.image_transforms is not None:\n img_torch = self.image_transforms(img_torch)\n\n batch = {\"image\" : img_torch,\n # \"path\" : path\n }\n\n self.prev_index += 1\n return batch\n\n def __len__(self):\n return self.total_len" }, { "identifier": "save_landmark", "path": "inferno/utils/FaceDetector.py", "snippet": "def save_landmark(fname, landmark, landmark_type):\n with open(fname, \"wb\") as f:\n pkl.dump(landmark_type, f)\n pkl.dump(landmark, f)" }, { "identifier": "save_landmark_v2", "path": "inferno/utils/FaceDetector.py", "snippet": "def save_landmark_v2(fname, landmark, landmark_confidence, landmark_type):\n with open(fname, \"wb\") as f:\n pkl.dump(landmark_type, f)\n pkl.dump(landmark_confidence, f)\n pkl.dump(landmark, f)" } ]
from torch.utils.data.dataloader import DataLoader from pathlib import Path from typing import Optional, Union, List from tqdm import tqdm, auto from torchvision.transforms import Resize, Compose from inferno.datasets.ImageTestDataset import TestData from inferno.datasets.FaceDataModuleBase import FaceDataModuleBase from inferno.datasets.ImageDatasetHelpers import point2bbox, bbpoint_warp from inferno.datasets.UnsupervisedImageDataset import UnsupervisedImageDataset from facenet_pytorch import InceptionResnetV1 from collections import OrderedDict from inferno.datasets.IO import (save_emotion, save_segmentation_list, save_reconstruction_list, save_reconstruction_list_v2, save_emotion_list, save_emotion_list_v2, load_reconstruction_list_v2 ) from PIL import Image, ImageDraw, ImageFont from inferno.utils.other import get_path_to_assets from skimage.io import imread from skvideo.io import vreader, vread from inferno.utils.batch import dict_to_device from inferno.datasets.VideoFaceDetectionDataset import VideoFaceDetectionDataset from inferno.utils.FaceDetector import save_landmark, save_landmark_v2 from inferno.layers.losses.EmonetLoader import get_emonet from inferno.datasets.IO import load_segmentation_list, save_segmentation_list_v2 from inferno.utils.other import get_path_to_externals from emonet.models import EmoNet from torchvision.transforms import Resize from inferno.datasets.VideoFaceDetectionDataset import VideoFaceDetectionDataset from inferno.models.temporal.Preprocessors import EmotionRecognitionPreprocessor from munch import Munch from inferno.models.temporal.Preprocessors import EmotionRecognitionPreprocessor from munch import Munch from inferno.models.temporal.Preprocessors import FaceRecPreprocessor from munch import Munch from inferno.models.temporal.Preprocessors import EmocaPreprocessor from munch import Munch from inferno.models.temporal.Preprocessors import EmocaPreprocessor from munch import Munch from inferno.models.temporal.external.SpectrePreprocessor import SpectrePreprocessor from munch import Munch from decalib.deca import DECA from decalib.utils.config import cfg as deca_cfg from inferno.models.IO import get_checkpoint_with_kwargs from omegaconf import OmegaConf from inferno.models.DECA import instantiate_deca from inferno_apps.EMOCA.utils.load import load_model from inferno.models.external.Deep3DFace import Deep3DFaceModule from omegaconf import DictConfig from scipy.io.matlab import savemat from skvideo.io import vread from inferno.models.DecaFLAME import FLAME_mediapipe from inferno.utils.PyRenderMeshSequenceRenderer import PyRenderMeshSequenceRenderer from inferno.models.Renderer import SRenderY from inferno.utils.video import combine_video_audio, concatenate_videos from PIL import Image, ImageDraw from PIL import Image, ImageDraw, ImageFont from collections import Counter from matplotlib.pyplot import get_cmap from collections import Counter from matplotlib.pyplot import get_cmap from collections import Counter, OrderedDict from collections import Counter, OrderedDict from sklearn.cluster import DBSCAN from scipy.interpolate import griddata, RBFInterpolator from inferno.datasets.FaceAlignmentTools import align_video, align_and_save_video from skimage.transform import resize from inferno.utils.MediaPipeLandmarkDetector import np2mediapipe from inferno.utils.DecaUtils import tensor_vis_landmarks from inferno.utils.video import save_video, concatenate_videos, save_video_with_audio from skvideo.io import vread from inferno.datasets.IO import load_emotion_list, load_segmentation_list, process_segmentation import os, sys import subprocess import numpy as np import torch import pickle as pkl import hickle as hkl import inferno import cv2 import skvideo.io import torch.nn.functional as F import types import time import inspect import inferno.utils.DecaUtils as util import datetime import datetime import inferno.utils.DecaUtils as util import wandb import ffmpeg import scipy import mediapipe as mp
14,781
# @profile def _detect_landmarkes_in_aligned_sequence(self, sequence_id): video_file = self._get_path_to_aligned_videos(sequence_id) print("Detecting landmarks in aligned sequence: '%s'" % video_file) out_landmark_folder = self._get_path_to_sequence_landmarks(sequence_id, use_aligned_videos=True) out_landmark_folder.mkdir(exist_ok=True, parents=True) if self.save_landmarks_one_file: overwrite = False if not overwrite and (out_landmark_folder / "landmarks.pkl").is_file() and (out_landmark_folder / "landmarks_original.pkl").is_file() and (out_landmark_folder / "landmark_types.pkl").is_file(): print("Files with landmarks already found in '%s'. Skipping" % out_landmark_folder) return # start_fid = 0 if self.unpack_videos: raise NotImplementedError("Not implemented and should not be. Unpacking videos into a sequence of images is pricy.") # frame_list = self.frame_lists[sequence_id] # fid = 0 # if len(frame_list) == 0: # print("Nothing to detect in: '%s'. All frames have been processed" % self.video_list[sequence_id]) # for fid, frame_fname in enumerate(tqdm(range(start_fid, len(frame_list)))): # # if fid % detector_instantion_frequency == 0: # # self._instantiate_detector(overwrite=True) # self._detect_faces_in_image_wrapper(frame_list, fid, out_detection_folder, out_landmark_folder, out_file_boxes, # centers_all, sizes_all, detection_fnames_all, landmark_fnames_all) else: # if start_fid == 0: # videogen = vreader(str(video_name)) videogen = skvideo.io.FFmpegReader(str(video_file)) # videogen = vread(str(video_name)) # for i in range(start_fid): # _discarded_frame = next(videogen) # else: # videogen = vread(str(video_name)) self._detect_landmarks_no_face_detection(videogen, out_landmark_folder) def _detect_landmarks_no_face_detection(self, detection_fnames_or_ims, out_landmark_folder, path_depth = 0): """ Just detects landmarks without face detection. The images should already be cropped to the face. """ if self.save_landmarks_one_file: overwrite = False single_out_file = out_landmark_folder / "landmarks.pkl" if single_out_file.is_file() and not overwrite: print(f"Landmarks already found in {single_out_file}, skipping") return device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print(device) # net, landmark_type, batch_size = self._get_segmentation_net(device) # if self.save_detection_images: # ref_im = imread(detection_fnames_or_ims[0]) # else: # ref_im = detection_fnames_or_ims[0] # ref_size = Resize((ref_im.shape[0], ref_im.shape[1]), interpolation=Image.NEAREST) # ref_size = None optimal_landmark_detector_size = self.face_detector.optimal_landmark_detector_im_size() transforms = Compose([ Resize((optimal_landmark_detector_size, optimal_landmark_detector_size)), # Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) # transforms=None batch_size = 64 if isinstance(detection_fnames_or_ims, types.GeneratorType): im_read = "skvreader" elif isinstance(detection_fnames_or_ims, (skvideo.io.FFmpegReader)): im_read = "skvffmpeg" else: im_read = 'pil' if not isinstance(detection_fnames_or_ims[0], np.ndarray) else None dataset = UnsupervisedImageDataset(detection_fnames_or_ims, image_transforms=transforms, im_read=im_read) num_workers = 4 if im_read not in ["skvreader", "skvffmpeg"] else 1 # videos can only be read on 1 thread frame by frame loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False) # import matplotlib.pyplot as plt if self.save_landmarks_one_file: # out_landmark_names = [] out_landmarks = [] out_landmark_types = [] out_landmarks_scores = [] for i, batch in enumerate(tqdm(loader)): # facenet_pytorch expects this stanadrization for the input to the net # images = fixed_image_standardization(batch['image'].to(device)) images = batch['image'].cuda() # start = time.time() with torch.no_grad(): landmarks, landmark_scores = self.face_detector.landmarks_from_batch_no_face_detection(images) # end = time.time() # import matplotlib.pyplot as plt # plt.imshow(images[0].cpu().numpy().transpose(1,2,0)) # # plot the landmark points # plt.scatter(landmarks[0, :, 0] * images.shape[3], landmarks[0, :, 1] * images.shape[2], s=10, marker='.', c='r') # plt.show() if self.save_landmarks_frame_by_frame: start = time.time() for j in range(landmarks.shape[0]): image_path = batch['path'][j] # if isinstance(out_segmentation_folder, list): if path_depth > 0: rel_path = Path(image_path).parent.relative_to(Path(image_path).parents[path_depth]) landmark_path = out_landmark_folder / rel_path / (Path(image_path).stem + ".pkl") else: landmark_path = out_landmark_folder / (Path(image_path).stem + ".pkl") landmark_path.parent.mkdir(exist_ok=True, parents=True)
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at [email protected] # For commercial licensing contact, please contact [email protected] """ # import torchaudio # from collections import OrderedDict # import subprocess # from memory_profiler import profile def add_pretrained_deca_to_path(): deca_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'DECA')) if deca_path not in sys.path: sys.path.insert(0, deca_path) class FaceVideoDataModule(FaceDataModuleBase): """ Base data module for face video datasets. Contains the functionality to unpack the videos, detect faces, segment faces, ... """ def __init__(self, root_dir, output_dir, processed_subfolder=None, face_detector='fan', face_detector_threshold=0.9, image_size=224, scale=1.25, processed_video_size=256, device=None, unpack_videos=True, save_detection_images=True, save_landmarks=True, save_landmarks_one_file=False, save_segmentation_frame_by_frame=True, save_segmentation_one_file=False, bb_center_shift_x=0, # in relative numbers bb_center_shift_y=0, # in relative numbers (i.e. -0.1 for 10% shift upwards, ...) include_processed_audio = True, include_raw_audio = True, preload_videos = False, inflate_by_video_size = False, read_video=True, read_audio=True, align_images=True, return_mica_images = False, ): super().__init__(root_dir, output_dir, processed_subfolder=processed_subfolder, face_detector=face_detector, face_detector_threshold=face_detector_threshold, image_size = image_size, scale = scale, device=device, save_detection_images=save_detection_images, save_landmarks_frame_by_frame=save_landmarks, save_landmarks_one_file=save_landmarks_one_file, save_segmentation_frame_by_frame=save_segmentation_frame_by_frame, # default save_segmentation_one_file=save_segmentation_one_file, # only use for large scale video datasets (that would produce too many files otherwise) bb_center_shift_x=bb_center_shift_x, # in relative numbers bb_center_shift_y=bb_center_shift_y, # in relative numbers (i.e. -0.1 for 10% shift upwards, ...) return_mica_images = return_mica_images, ) self.unpack_videos = unpack_videos self.detect_landmarks_on_restored_images = None self.processed_video_size = processed_video_size # self._instantiate_detector() # self.face_recognition = InceptionResnetV1(pretrained='vggface2').eval().to(device) # self.version = 2 self.version = 3 self.video_list = None self.video_metas = None self.audio_metas = None self.annotation_list = None self.frame_lists = None self.loaded = False # self.detection_lists = None self.detection_fnames = [] self.detection_centers = [] self.detection_sizes = [] self.include_processed_audio = include_processed_audio self.include_raw_audio = include_raw_audio self.preload_videos = preload_videos self.inflate_by_video_size = inflate_by_video_size self._must_include_audio = False self.read_video=read_video self.read_audio=read_audio self.align_images = align_images # will align the images when data loading (use if videos not already aligned) @property def metadata_path(self): return os.path.join(self.output_dir, "metadata.pkl") def prepare_data(self, *args, **kwargs): outdir = Path(self.output_dir) # is dataset already processed? # if outdir.is_dir(): if Path(self.metadata_path).is_file(): print("The dataset is already processed. Loading") self._loadMeta() return # else: self._gather_data() self._unpack_videos() self._saveMeta() def _is_video_dataset(self): return True def _unpack_videos(self): self.frame_lists = [] for vi, video_file in enumerate(tqdm(self.video_list)): self._unpack_video(vi) def get_frame_number_format(self): return "%06d" def count_num_frames(self): num_frames = 0 for i in range(len(self.video_metas)): num_frames += self.video_metas[i]['num_frames'] return num_frames # def _get_unpacked_video_subfolder(self, video_idx): # return Path(self._video_category(video_idx)) / video_file.parts[-3] /self._video_set(video_idx) / video_file.stem def _unpack_video(self, video_idx, overwrite=False): video_file = Path(self.root_dir) / self.video_list[video_idx] # suffix = self._get_unpacked_video_subfolder(video_idx) # out_folder = Path(self.output_dir) / suffix out_folder = self._get_path_to_sequence_frames(video_idx) if not out_folder.exists() or overwrite: print("Unpacking video to '%s'" % str(out_folder)) out_folder.mkdir(exist_ok=True, parents=True) out_format = out_folder / (self.get_frame_number_format() + ".png") out_format = '-r 1 -i %s -r 1 ' % str(video_file) + ' "' + str(out_format) + '"' # out_format = ' -r 1 -i %s ' % str(video_file) + ' "' + "$frame.%03d.png" + '"' # subprocess.call(['ffmpeg', out_format]) # os.system("ffmpeg " + out_format) args = ['ffmpeg', '-r', '1', '-i', str(video_file), '-r', '1', str(out_format)] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: raise Exception('ffprobe', out, err) # import ffmpeg # stream = ffmpeg.input(str(video_file)) # # stream = ffmpeg.output(stream.video, str(out_format)) # stream = ffmpeg.output(stream.video, "%06.png") # stream.run() frame_list = sorted(list(out_folder.glob("*.png"))) frame_list = [path.relative_to(self.output_dir) for path in frame_list] self.frame_lists += [frame_list] n_frames = len(frame_list) expected_frames = int(self.video_metas[video_idx]['num_frames']) if n_frames == expected_frames: pass # print("Successfully unpacked the video into %d frames" % expected_frames) else: print("[WARNING] Expected %d frames but got %d vor video '%s'" % (expected_frames, n_frames, str(video_file))) def _extract_audio(self): # extract audio for all videos print("Extracting audio for all videos") for vi, video_file in enumerate(auto.tqdm(self.video_list)): self._extract_audio_for_video(vi) print("Audio extracted for all videos") def _extract_audio_for_video(self, video_idx): video_file = Path(self.root_dir) / self.video_list[video_idx] audio_file = self._get_path_to_sequence_audio(video_idx) # extract the audio from the video using ffmpeg if not audio_file.is_file(): # print("Extracting audio from video '%s'" % str(video_file)) audio_file.parent.mkdir(exist_ok=True, parents=True) cmd = "ffmpeg -i " + str(video_file) + " -f wav -vn -y " + str(audio_file) + ' -loglevel quiet' os.system(cmd) else: print("Skipped extracting audio from video '%s' because it already exists" % str(video_file)) def _detect_faces(self): #, videos_unpacked=True): #, save_detection_images=True, save_landmarks=True): for sid in range(self.num_sequences): self._detect_faces_in_sequence(sid) def _get_path_to_sequence_files(self, sequence_id, file_type, method="", suffix="", assert_=True): if assert_: assert file_type in ['videos', 'detections', "landmarks", "segmentations", "emotions", "reconstructions", "rec_videos"] video_file = self.video_list[sequence_id] if len(method) > 0: if Path(method).is_absolute(): file_type += "_" + Path(method).name else: file_type += "_" + method if len(suffix) > 0: file_type += suffix suffix = Path(self._video_category(sequence_id)) / file_type /self._video_set(sequence_id) / video_file.stem out_folder = Path(self.output_dir) / suffix return out_folder def _get_path_to_sequence_audio(self, sequence_id): return self._get_path_to_sequence_files(sequence_id, "audio").with_suffix(".wav") def _get_path_to_sequence_frames(self, sequence_id): return self._get_path_to_sequence_files(sequence_id, "videos") def _get_path_to_aligned_videos(self, sequence_id): return self._get_path_to_sequence_files(sequence_id, "videos_aligned").with_suffix(".mp4") def _get_path_to_sequence_detections(self, sequence_id): return self._get_path_to_sequence_files(sequence_id, "detections") def _get_landmark_method(self): return "" # for backwards compatibility (AffectNet, ...), the inheriting classes should specify the method def _get_path_to_sequence_landmarks(self, sequence_id, use_aligned_videos=False, landmark_method = None): if self.save_detection_images: # landmarks will be saved wrt to the detection images landmark_subfolder = "landmarks" elif use_aligned_videos: landmark_subfolder = "landmarks_aligned" else: # landmarks will be saved wrt to the original images (not the detection images), # so better put them in a different folder to make it clear landmark_subfolder = "landmarks_original" method = landmark_method or self._get_landmark_method() return self._get_path_to_sequence_files(sequence_id, landmark_subfolder, method=method) def _get_segmentation_method(self): return "" def _get_path_to_sequence_segmentations(self, sequence_id, use_aligned_videos=False, segmentation_net=None): if self.save_detection_images: # landmarks will be saved wrt to the detection images segmentation_subfolder = "segmentations" elif use_aligned_videos: segmentation_subfolder = "segmentations_aligned" else: # landmarks will be saved wrt to the original images (not the detection images), # so better put them in a different folder to make it clear segmentation_subfolder = "segmentations_original" method = segmentation_net or self._get_segmentation_method() return self._get_path_to_sequence_files(sequence_id, segmentation_subfolder, method=method) # return self._get_path_to_sequence_files(sequence_id, "segmentations") def _get_path_to_sequence_visualizations(self, sequence_id): return self._get_path_to_sequence_files(sequence_id, "visualizations", assert_=False) # def _get_path_to_sequence_landmarks(self, sequence_id): # return self._get_path_to_sequence_files(sequence_id, "landmarks") # def _get_path_to_sequence_segmentations(self, sequence_id): # return self._get_path_to_sequence_files(sequence_id, "segmentations") def _get_path_to_sequence_emotions(self, sequence_id, emo_method="resnet50"): return self._get_path_to_sequence_files(sequence_id, "emotions", method=emo_method) def _video_category(self, sequence_id): video_file = self.video_list[sequence_id] out_folder = video_file.parts[-4] return out_folder def _video_set(self, sequence_id): video_file = self.video_list[sequence_id] out_folder = video_file.parts[-2] return out_folder def _get_path_to_sequence_reconstructions(self, sequence_id, rec_method='emoca', suffix=''): if suffix is None: suffix = '' if rec_method == 'deca': return self._get_path_to_sequence_files(sequence_id, "reconstructions", "", suffix) # else: elif 'FaceReconstruction' not in rec_method and (rec_method in ['emoca', 'deep3dface', 'spectre'] or \ rec_method.lower().startswith('emoca') or rec_method.lower().startswith('emica')): return self._get_path_to_sequence_files(sequence_id, "reconstructions", rec_method, suffix) else: rec_method_path = Path(rec_method) return self._get_path_to_sequence_files(sequence_id, "reconstructions", rec_method_path.name, suffix) # raise ValueError("Unknown reconstruction method '%s'" % rec_method) # video_file = self.video_list[sequence_id] # if rec_method == 'deca': # suffix = Path(self._video_category(sequence_id)) / f'reconstructions{suffix}' /self._video_set(sequence_id) / video_file.stem # elif rec_method == 'emoca': # suffix = Path(self._video_category(sequence_id)) / f'reconstructions_emoca{suffix}' /self._video_set(sequence_id) / video_file.stem # elif rec_method == 'deep3dface': # suffix = Path(self._video_category(sequence_id)) / f'reconstructions_deep3dface{suffix}' /self._video_set(sequence_id) / video_file.stem # else: # raise ValueError("Unknown reconstruction method '%s'" % rec_method) # out_folder = Path(self.output_dir) / suffix # return out_folder def _get_path_to_sequence_reconstructions_videos(self, sequence_id, rec_method='emoca', suffix=''): return self._get_path_to_sequence_files(sequence_id, "rec_videos", rec_method, suffix) # @profile def _detect_faces_in_sequence(self, sequence_id): # if self.detection_lists is None or len(self.detection_lists) == 0: # self.detection_lists = [ [] for i in range(self.num_sequences)] video_file = self.video_list[sequence_id] print("Detecting faces in sequence: '%s'" % video_file) # suffix = Path(self._video_category(sequence_id)) / 'detections' /self._video_set(sequence_id) / video_file.stem out_detection_folder = self._get_path_to_sequence_detections(sequence_id) out_detection_folder.mkdir(exist_ok=True, parents=True) out_file_boxes = out_detection_folder / "bboxes.pkl" out_landmark_folder = self._get_path_to_sequence_landmarks(sequence_id) out_landmark_folder.mkdir(exist_ok=True, parents=True) if self.save_landmarks_one_file: overwrite = False if not overwrite and (out_landmark_folder / "landmarks.pkl").is_file() and (out_landmark_folder / "landmarks_original.pkl").is_file() and (out_landmark_folder / "landmark_types.pkl").is_file(): print("Files with landmarks already found in '%s'. Skipping" % out_landmark_folder) return centers_all = [] sizes_all = [] detection_fnames_all = [] landmark_fnames_all = [] # save_folder = frame_fname.parents[3] / 'detections' # # TODO: resuming is not tested, probably doesn't work yet # checkpoint_frequency = 100 # resume = False # if resume and out_file.exists(): # detection_fnames_all, landmark_fnames_all, centers_all, sizes_all, start_fid = \ # FaceVideoDataModule.load_detections(out_file) # else: # start_fid = 0 # # # hack trying to circumvent memory leaks on the cluster # detector_instantion_frequency = 200 start_fid = 0 if self.unpack_videos: frame_list = self.frame_lists[sequence_id] fid = 0 if len(frame_list) == 0: print("Nothing to detect in: '%s'. All frames have been processed" % self.video_list[sequence_id]) for fid, frame_fname in enumerate(tqdm(range(start_fid, len(frame_list)))): # if fid % detector_instantion_frequency == 0: # self._instantiate_detector(overwrite=True) self._detect_faces_in_image_wrapper(frame_list, fid, out_detection_folder, out_landmark_folder, out_file_boxes, centers_all, sizes_all, detection_fnames_all, landmark_fnames_all) else: num_frames = self.video_metas[sequence_id]['num_frames'] if self.detect_landmarks_on_restored_images is None: video_name = self.root_dir / self.video_list[sequence_id] else: video_name = video_file = self._get_path_to_sequence_restored( sequence_id, method=self.detect_landmarks_on_restored_images) assert video_name.is_file() if start_fid == 0: videogen = vreader(str(video_name)) # videogen = vread(str(video_name)) # for i in range(start_fid): # _discarded_frame = next(videogen # reader = skvideo.io.FFmpegReader(str(video_name)) # num_frames = videogen.getShape()[0] else: videogen = vread(str(video_name)) if self.save_landmarks_one_file: out_landmarks_all = [] # landmarks wrt to the aligned image out_landmarks_original_all = [] # landmarks wrt to the original image out_bbox_type_all = [] else: out_landmarks_all = None out_landmarks_original_all = None out_bbox_type_all = None for fid in tqdm(range(start_fid, num_frames)): try: self._detect_faces_in_image_wrapper(videogen, fid, out_detection_folder, out_landmark_folder, out_file_boxes, centers_all, sizes_all, detection_fnames_all, landmark_fnames_all, out_landmarks_all, out_landmarks_original_all, out_bbox_type_all) except StopIteration as e: print(f"[WARNING] Reached the end of the video. Expected number of frames: {num_frames} but the video has only {fid} frames.") break if self.save_landmarks_one_file: # saves all landmarks per video out_file = out_landmark_folder / "landmarks.pkl" FaceVideoDataModule.save_landmark_list(out_file, out_landmarks_all) out_file = out_landmark_folder / "landmarks_original.pkl" FaceVideoDataModule.save_landmark_list(out_file, out_landmarks_original_all) print(f"Landmarks for sequence saved into one file: {out_file}") out_file = out_landmark_folder / "landmark_types.pkl" FaceVideoDataModule.save_landmark_list(out_file, out_bbox_type_all) FaceVideoDataModule.save_detections(out_file_boxes, detection_fnames_all, landmark_fnames_all, centers_all, sizes_all, fid) print("Done detecting faces in sequence: '%s'" % self.video_list[sequence_id]) return # @profile def _detect_landmarkes_in_aligned_sequence(self, sequence_id): video_file = self._get_path_to_aligned_videos(sequence_id) print("Detecting landmarks in aligned sequence: '%s'" % video_file) out_landmark_folder = self._get_path_to_sequence_landmarks(sequence_id, use_aligned_videos=True) out_landmark_folder.mkdir(exist_ok=True, parents=True) if self.save_landmarks_one_file: overwrite = False if not overwrite and (out_landmark_folder / "landmarks.pkl").is_file() and (out_landmark_folder / "landmarks_original.pkl").is_file() and (out_landmark_folder / "landmark_types.pkl").is_file(): print("Files with landmarks already found in '%s'. Skipping" % out_landmark_folder) return # start_fid = 0 if self.unpack_videos: raise NotImplementedError("Not implemented and should not be. Unpacking videos into a sequence of images is pricy.") # frame_list = self.frame_lists[sequence_id] # fid = 0 # if len(frame_list) == 0: # print("Nothing to detect in: '%s'. All frames have been processed" % self.video_list[sequence_id]) # for fid, frame_fname in enumerate(tqdm(range(start_fid, len(frame_list)))): # # if fid % detector_instantion_frequency == 0: # # self._instantiate_detector(overwrite=True) # self._detect_faces_in_image_wrapper(frame_list, fid, out_detection_folder, out_landmark_folder, out_file_boxes, # centers_all, sizes_all, detection_fnames_all, landmark_fnames_all) else: # if start_fid == 0: # videogen = vreader(str(video_name)) videogen = skvideo.io.FFmpegReader(str(video_file)) # videogen = vread(str(video_name)) # for i in range(start_fid): # _discarded_frame = next(videogen) # else: # videogen = vread(str(video_name)) self._detect_landmarks_no_face_detection(videogen, out_landmark_folder) def _detect_landmarks_no_face_detection(self, detection_fnames_or_ims, out_landmark_folder, path_depth = 0): """ Just detects landmarks without face detection. The images should already be cropped to the face. """ if self.save_landmarks_one_file: overwrite = False single_out_file = out_landmark_folder / "landmarks.pkl" if single_out_file.is_file() and not overwrite: print(f"Landmarks already found in {single_out_file}, skipping") return device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print(device) # net, landmark_type, batch_size = self._get_segmentation_net(device) # if self.save_detection_images: # ref_im = imread(detection_fnames_or_ims[0]) # else: # ref_im = detection_fnames_or_ims[0] # ref_size = Resize((ref_im.shape[0], ref_im.shape[1]), interpolation=Image.NEAREST) # ref_size = None optimal_landmark_detector_size = self.face_detector.optimal_landmark_detector_im_size() transforms = Compose([ Resize((optimal_landmark_detector_size, optimal_landmark_detector_size)), # Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) # transforms=None batch_size = 64 if isinstance(detection_fnames_or_ims, types.GeneratorType): im_read = "skvreader" elif isinstance(detection_fnames_or_ims, (skvideo.io.FFmpegReader)): im_read = "skvffmpeg" else: im_read = 'pil' if not isinstance(detection_fnames_or_ims[0], np.ndarray) else None dataset = UnsupervisedImageDataset(detection_fnames_or_ims, image_transforms=transforms, im_read=im_read) num_workers = 4 if im_read not in ["skvreader", "skvffmpeg"] else 1 # videos can only be read on 1 thread frame by frame loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False) # import matplotlib.pyplot as plt if self.save_landmarks_one_file: # out_landmark_names = [] out_landmarks = [] out_landmark_types = [] out_landmarks_scores = [] for i, batch in enumerate(tqdm(loader)): # facenet_pytorch expects this stanadrization for the input to the net # images = fixed_image_standardization(batch['image'].to(device)) images = batch['image'].cuda() # start = time.time() with torch.no_grad(): landmarks, landmark_scores = self.face_detector.landmarks_from_batch_no_face_detection(images) # end = time.time() # import matplotlib.pyplot as plt # plt.imshow(images[0].cpu().numpy().transpose(1,2,0)) # # plot the landmark points # plt.scatter(landmarks[0, :, 0] * images.shape[3], landmarks[0, :, 1] * images.shape[2], s=10, marker='.', c='r') # plt.show() if self.save_landmarks_frame_by_frame: start = time.time() for j in range(landmarks.shape[0]): image_path = batch['path'][j] # if isinstance(out_segmentation_folder, list): if path_depth > 0: rel_path = Path(image_path).parent.relative_to(Path(image_path).parents[path_depth]) landmark_path = out_landmark_folder / rel_path / (Path(image_path).stem + ".pkl") else: landmark_path = out_landmark_folder / (Path(image_path).stem + ".pkl") landmark_path.parent.mkdir(exist_ok=True, parents=True)
save_landmark_v2(landmark_path, landmarks[j], landmark_scores[j], self.face_detector.landmark_type())
16
2023-11-07 20:13:32+00:00
24k
google-research/semivl
model/builder.py
[ { "identifier": "TIMMVisionTransformer", "path": "model/backbone/timm_vit.py", "snippet": "class TIMMVisionTransformer(nn.Module):\n\n def __init__(\n self,\n variant,\n timm_load_pretrained,\n drop_path_rate,\n img_size,\n out_indices,\n ):\n super(TIMMVisionTransformer, self).__init__()\n self.m = timm.create_model(\n variant,\n pretrained=timm_load_pretrained,\n drop_path_rate=drop_path_rate,\n img_size=img_size,\n )\n self.patch_size = self.m.patch_embed.patch_size\n self.img_size = img_size\n self.out_indices = out_indices\n assert max(self.out_indices) <= 11\n\n def forward_features(self, x):\n feats = []\n x = self.m.patch_embed(x)\n x = self.m._pos_embed(x)\n if self.m.grad_checkpointing and not torch.jit.is_scripting():\n raise ValueError(self.m.grad_checkpointing)\n # x = checkpoint_seq(self.blocks, x)\n else:\n for i, block in enumerate(self.m.blocks):\n x = block(x)\n if i in self.out_indices:\n out = self.m.norm(x)\n feats.append(out)\n x = self.m.norm(x)\n return x, feats\n\n def forward(self, x: torch.Tensor):\n if x.shape[-2] != self.m.patch_embed.img_size[0] or x.shape[-1] != self.m.patch_embed.img_size[1]:\n assert not self.training\n x = F.interpolate(x, size=self.img_size, mode='bilinear', align_corners=False)\n B, _, H, W = x.shape\n H = H // self.patch_size[0]\n W = W // self.patch_size[1]\n\n x, feats = self.forward_features(x)\n outs = [\n tuple([f[:, 1:].reshape(B, H, W, -1).permute(0, 3, 1, 2) for f in feats]),\n x[:, 0], # cls_token\n ]\n\n return outs" }, { "identifier": "DLV3PHead", "path": "model/decode_heads/dlv3p_head.py", "snippet": "class DLV3PHead(BaseDecodeHead):\n\n def __init__(self, c1_in_channels, c1_channels, dilations, img_size, **kwargs):\n super(DLV3PHead, self).__init__(**kwargs)\n self.image_size = img_size\n self.aspp = ASPPModule(self.in_channels, dilations)\n self.c1_proj = nn.Sequential(\n nn.Conv2d(c1_in_channels, c1_channels, 1, bias=False),\n nn.BatchNorm2d(c1_channels),\n nn.ReLU(True))\n fuse_channels = self.in_channels // 8 + c1_channels\n self.head = nn.Sequential(\n nn.Conv2d(fuse_channels, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n nn.Conv2d(256, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n nn.Conv2d(256, self.num_classes, 1, bias=True))\n self.conv_seg = None\n\n def forward(self, inputs, force_output_pred_masks=False):\n if force_output_pred_masks:\n inputs = inputs[0][0]\n assert len(inputs) == 2\n c1, c4 = inputs[0], inputs[1]\n\n c4 = self.aspp(c4)\n c1 = self.c1_proj(c1)\n c4 = F.interpolate(c4, size=c1.shape[-2:], mode=\"bilinear\", align_corners=self.align_corners)\n x = torch.cat([c1, c4], dim=1)\n out = self.head(x)\n\n if force_output_pred_masks:\n out = F.interpolate(out, size=(self.image_size, self.image_size),\n mode='bilinear', align_corners=self.align_corners)\n out = {\"pred_masks\": out}\n\n return out" }, { "identifier": "VLGHead", "path": "model/decode_heads/vlg_head.py", "snippet": "class VLGHead(nn.Module):\n def __init__(self,\n img_size,\n num_classes,\n text_in_channels,\n text_channels,\n up_channels,\n skip_in_channels,\n skip_channels,\n skip_from_conv_feat,\n num_layers,\n num_heads,\n channels,\n pool_size,\n conv1_ksize,\n loss_decode,\n align_corners,\n ) -> None:\n super().__init__()\n self.image_size = img_size\n self.num_classes = num_classes\n self.align_corners = align_corners\n self.text_in_channels = text_in_channels\n self.num_layers = num_layers\n self.channels = channels\n self.skip_from_conv_feat = skip_from_conv_feat\n assert loss_decode is None\n\n self.conv1 = nn.Conv2d(1, channels, kernel_size=conv1_ksize, stride=1, padding=(conv1_ksize-1)//2)\n self.aspp = ASPPModule(channels)\n self.layers = nn.ModuleList([\n SemanticTransformer(\n channels=channels, text_channels=text_channels, num_heads=num_heads, pool_size=pool_size\n ) for _ in range(num_layers)\n ])\n\n self.text_proj = nn.Sequential(\n nn.Linear(text_in_channels, text_channels),\n nn.ReLU())\n\n self.skip_proj = nn.ModuleList([\n nn.Sequential(\n nn.Conv2d(sic, sc, kernel_size=3, stride=1, padding=1),\n nn.ReLU(),\n ) for sic, sc in zip(skip_in_channels, skip_channels)\n ])\n\n self.up1 = Up(channels, up_channels[0], skip_channels[0])\n self.up2 = Up(up_channels[0], up_channels[1], skip_channels[1])\n self.head = nn.Conv2d(up_channels[1], 1, kernel_size=3, stride=1, padding=1)\n\n def forward(self, inputs, force_output_pred_masks=False):\n inputs_both = inputs\n img_feat_pyramid = inputs_both[0][0]\n img_feats = img_feat_pyramid[-1]\n if self.skip_from_conv_feat:\n conv_feats = inputs_both[2]\n if len(img_feat_pyramid) > 1:\n skip_feats = [\n *img_feat_pyramid[:-1][::-1],\n *conv_feats[::-1],\n ]\n else:\n skip_feats = conv_feats[::-1]\n assert len(self.skip_proj) == len(skip_feats)\n else:\n skip_feats = img_feat_pyramid[:-1][::-1]\n text_feats = inputs_both[1]\n\n text_feats = text_feats.repeat(img_feats.shape[0], 1, 1).float()\n B, C, H, W = img_feats.shape\n assert list(text_feats.shape) == [B, self.num_classes, C]\n\n # Compute Similarity Map\n img_feats = F.normalize(img_feats, dim=1)\n text_feats = F.normalize(text_feats, dim=-1)\n x = torch.einsum('bchw, bnc -> bnhw', img_feats, text_feats)\n\n # Spatial Reasoning\n x = rearrange(x, 'b n h w -> (b n) () h w')\n x = self.conv1(x)\n x = self.aspp(x)\n x = rearrange(x, '(b n) c h w -> b c n h w', b=B)\n\n # Semantic Reasoning\n if self.text_proj is not None:\n text_feats = self.text_proj(text_feats)\n\n for layer in self.layers:\n x = layer(x, text_feats)\n\n # Upsampling\n if self.skip_proj is not None:\n skip_feats = [proj(f) for proj, f in zip(self.skip_proj, skip_feats)]\n\n x = rearrange(x, 'b c n h w -> (b n) c h w')\n x = self.up1(x, skip_feats[0])\n x = self.up2(x, skip_feats[1])\n x = self.head(x)\n x = rearrange(x, '(b n) () h w -> b n h w', b=B)\n\n if x.shape[1] != self.num_classes:\n cls2con = get_class_to_concept_idxs(self.load_text_embedding)\n x = aggregate_concept_predictions(x, cls2con)\n\n if force_output_pred_masks:\n x = F.interpolate(x, size=(self.image_size, self.image_size),\n mode='bilinear', align_corners=self.align_corners)\n x = {\"pred_masks\": x}\n\n return x" }, { "identifier": "VLM", "path": "model/vlm.py", "snippet": "class VLM(EncoderDecoder):\n def __init__(self,\n freeze_backbone=False,\n exclude_keys=None,\n load_text_embedding=None,\n load_mcc_text_embedding=None,\n load_pl_text_embedding=None,\n clip_encoder=None,\n conv_encoder=None,\n maskclip_class_filter=None,\n maskclip_trust_head=None,\n renorm_clip_img=False,\n **args):\n super(VLM, self).__init__(**args)\n assert load_text_embedding == load_pl_text_embedding\n assert maskclip_class_filter is None\n assert maskclip_trust_head is None\n self.local_iter = 0\n\n self.clip_encoder = None\n if clip_encoder is not None:\n self.clip_encoder = builder.build_backbone(clip_encoder)\n self.conv_encoder = None\n if conv_encoder is not None:\n self.conv_encoder = builder.build_backbone(conv_encoder)\n\n self.load_text_embedding = load_text_embedding\n self.decode_head.load_text_embedding = load_text_embedding\n self.load_mcc_text_embedding = load_mcc_text_embedding\n self.renorm_clip_img = renorm_clip_img\n if renorm_clip_img:\n print('Renormalize clip image.')\n if self.load_mcc_text_embedding:\n self.loaded_mcc_text_feat = np.load(self.load_mcc_text_embedding)\n self.loaded_mcc_text_feat = torch.from_numpy(self.loaded_mcc_text_feat).float()\n else:\n raise NotImplementedError\n\n if freeze_backbone:\n self.freeze(self.backbone, exclude_keys=exclude_keys)\n\n def renormalize_img_for_clip(self, img):\n if not self.renorm_clip_img:\n return img\n loader_mean, loader_std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n clip_mean, clip_std = [0.48145466, 0.4578275, 0.40821073], [0.26862954, 0.26130258, 0.27577711]\n loader_mean = torch.tensor(loader_mean, device=img.device).view(1, -1, 1, 1)\n loader_std = torch.tensor(loader_std, device=img.device).view(1, -1, 1, 1)\n clip_mean = torch.tensor(clip_mean, device=img.device).view(1, -1, 1, 1)\n clip_std = torch.tensor(clip_std, device=img.device).view(1, -1, 1, 1)\n return (img * loader_std + loader_mean - clip_mean) / clip_std\n\n def freeze(self, model, exclude_keys=None):\n for n, m in model.named_parameters():\n m.requires_grad = False\n if exclude_keys is not None:\n assert isinstance(exclude_keys, list)\n for k in exclude_keys:\n if str(k) in n:\n m.requires_grad = True\n print(f'Finetune {n}')\n \n def forward_maskclip(self, img, conf_tresh):\n img = self.renormalize_img_for_clip(img)\n self.clip_encoder.eval()\n with torch.no_grad():\n text_feat = self.loaded_mcc_text_feat.detach().to(img.device)\n visual_feat, _ = self.clip_encoder(img)\n visual_feat = visual_feat[-1]\n\n dense_pred = F.conv2d(visual_feat, text_feat[:, :, None, None])\n if dense_pred.shape[1] != self.num_classes:\n cls2con = get_class_to_concept_idxs(self.load_mcc_text_embedding)\n dense_pred = aggregate_concept_predictions(dense_pred, cls2con)\n assert dense_pred.shape[1] == self.num_classes\n dense_pred = F.interpolate(dense_pred, size=img.shape[-2:],\n mode='bilinear', align_corners=self.decode_head.align_corners)\n dense_pred = (100.0 * dense_pred).softmax(dim=1)\n dense_pred_certainty, dense_pred = dense_pred.max(dim=1)\n\n filtered_dense_pred = dense_pred.clone()\n filtered_dense_pred[dense_pred_certainty < conf_tresh] = 255\n return filtered_dense_pred\n\n def extract_feat(self, img):\n orig_img = img\n img = self.renormalize_img_for_clip(img)\n visual_feat = self.backbone(img)\n text_feat = np.load(self.load_text_embedding)\n text_feat = torch.from_numpy(text_feat).to(img.device)\n self.decode_head.load_text_embedding = self.load_text_embedding\n conv_feat = None\n if self.conv_encoder is not None:\n conv_feat = self.conv_encoder(orig_img)\n\n return [visual_feat, text_feat, conv_feat]\n\n def _decode_head_forward_test(self, x, img_metas):\n seg_logits = self.decode_head.forward(x, force_output_pred_masks=True)['pred_masks']\n return seg_logits" }, { "identifier": "MaskClipVisionTransformer", "path": "third_party/maskclip/models/backbones/maskclip_vit.py", "snippet": "class MaskClipVisionTransformer(BaseModule):\n \"\"\"Vision Transformer.\n\n This backbone is the implementation of `An Image is Worth 16x16 Words:\n Transformers for Image Recognition at\n Scale <https://arxiv.org/abs/2010.11929>`_.\n\n Args:\n img_size (int | tuple): Input image size. Default: 224.\n patch_size (int): The patch size. Default: 16.\n in_channels (int): Number of input channels. Default: 3.\n embed_dims (int): embedding dimension. Default: 768.\n num_layers (int): depth of transformer. Default: 12.\n num_heads (int): number of attention heads. Default: 12.\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim.\n Default: 4.\n out_indices (list | tuple | int): Output from which stages.\n Default: -1.\n qkv_bias (bool): enable bias for qkv if True. Default: True.\n drop_rate (float): Probability of an element to be zeroed.\n Default 0.0\n attn_drop_rate (float): The drop out rate for attention layer.\n Default 0.0\n drop_path_rate (float): stochastic depth rate. Default 0.0\n with_cls_token (bool): Whether concatenating class token into image\n tokens as transformer input. Default: True.\n output_cls_token (bool): Whether output the cls_token. If set True,\n `with_cls_token` must be True. Default: False.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='LN')\n act_cfg (dict): The activation config for FFNs.\n Default: dict(type='GELU').\n patch_norm (bool): Whether to add a norm in PatchEmbed Block.\n Default: False.\n final_norm (bool): Whether to add a additional layer to normalize\n final feature map. Default: False.\n interpolate_mode (str): Select the interpolate mode for position\n embeding vector resize. Default: bicubic.\n num_fcs (int): The number of fully-connected layers for FFNs.\n Default: 2.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save\n some memory while slowing down the training speed. Default: False.\n pretrained (str, optional): model pretrained path. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None.\n \"\"\"\n\n def __init__(self,\n img_size=224,\n patch_size=16,\n patch_bias=True,\n in_channels=3,\n embed_dims=768,\n num_layers=12,\n num_heads=12,\n mlp_ratio=4,\n out_indices=-1,\n qkv_bias=True,\n drop_rate=0.,\n attn_drop_rate=0.,\n drop_path_rate=0.,\n with_cls_token=True,\n output_cls_token=False,\n norm_cfg=dict(type='LN'),\n act_cfg=dict(type='GELU'),\n patch_norm=False,\n pre_norm=False,\n final_norm=False,\n return_qkv=False,\n return_clip_embed=False,\n skip_last_attn=False,\n interpolate_mode='bicubic',\n num_fcs=2,\n norm_eval=False,\n with_cp=False,\n pretrained=None,\n num_prompt_tokens=None,\n lora_layers=[],\n lora_r=4,\n lora_scaling=1,\n lora_dropout=0,\n lora_targets='qkvo',\n init_cfg=None):\n super(MaskClipVisionTransformer, self).__init__(init_cfg=init_cfg)\n\n if isinstance(img_size, int):\n img_size = to_2tuple(img_size)\n elif isinstance(img_size, tuple):\n if len(img_size) == 1:\n img_size = to_2tuple(img_size[0])\n assert len(img_size) == 2, \\\n f'The size of image should have length 1 or 2, ' \\\n f'but got {len(img_size)}'\n\n if output_cls_token:\n assert with_cls_token is True, f'with_cls_token must be True if' \\\n f'set output_cls_token to True, but got {with_cls_token}'\n\n assert not (init_cfg and pretrained), \\\n 'init_cfg and pretrained cannot be set at the same time'\n if isinstance(pretrained, str):\n warnings.warn('DeprecationWarning: pretrained is deprecated, '\n 'please use \"init_cfg\" instead')\n self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n elif pretrained is not None:\n raise TypeError('pretrained must be a str or None')\n\n self.img_size = img_size\n self.patch_size = patch_size\n self.interpolate_mode = interpolate_mode\n self.norm_eval = norm_eval\n self.with_cp = with_cp\n self.pretrained = pretrained\n self.num_prompt_tokens = num_prompt_tokens\n\n self.patch_embed = PatchEmbed(\n in_channels=in_channels,\n embed_dims=embed_dims,\n conv_type='Conv2d',\n kernel_size=patch_size,\n stride=patch_size,\n padding='corner',\n bias=patch_bias,\n norm_cfg=norm_cfg if patch_norm else None,\n init_cfg=None,\n )\n\n num_patches = (img_size[0] // patch_size) * \\\n (img_size[1] // patch_size)\n\n self.with_cls_token = with_cls_token\n self.output_cls_token = output_cls_token\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims))\n self.pos_embed = nn.Parameter(\n torch.zeros(1, num_patches + 1, embed_dims))\n self.drop_after_pos = nn.Dropout(p=drop_rate)\n\n if out_indices is None:\n self.out_indices = [num_layers]\n elif isinstance(out_indices, int):\n if out_indices == -1:\n out_indices = num_layers - 1\n self.out_indices = [out_indices]\n elif isinstance(out_indices, list) or isinstance(out_indices, tuple):\n self.out_indices = out_indices\n else:\n raise TypeError('out_indices must be type of int, list or tuple')\n\n dpr = [\n x.item() for x in torch.linspace(0, drop_path_rate, num_layers)\n ] # stochastic depth decay rule\n\n self.layers = ModuleList()\n for i in range(num_layers):\n self.layers.append(\n TransformerEncoderLayer(\n embed_dims=embed_dims,\n num_heads=num_heads,\n feedforward_channels=mlp_ratio * embed_dims,\n attn_drop_rate=attn_drop_rate,\n drop_rate=drop_rate,\n drop_path_rate=dpr[i],\n num_fcs=num_fcs,\n qkv_bias=qkv_bias,\n act_cfg=act_cfg,\n norm_cfg=norm_cfg,\n lora=(i in lora_layers),\n lora_r=lora_r,\n lora_scaling=lora_scaling,\n lora_dropout=lora_dropout,\n lora_targets=lora_targets,\n batch_first=True))\n\n self.pre_norm = pre_norm\n if pre_norm:\n self.norm0_name, norm0 = build_norm_layer(\n norm_cfg, embed_dims, postfix=0)\n self.add_module(self.norm0_name, norm0)\n\n self.final_norm = final_norm\n if final_norm:\n self.norm1_name, norm1 = build_norm_layer(\n norm_cfg, embed_dims, postfix=1)\n self.add_module(self.norm1_name, norm1)\n\n self.return_clip_embed = return_clip_embed\n if self.return_clip_embed:\n self.proj = nn.Conv2d(embed_dims, 512, 1, bias=False)\n self.add_module('proj', self.proj)\n\n self.return_qkv = [False] * num_layers\n if isinstance(return_qkv, bool):\n for out_i in self.out_indices:\n if out_i >= num_layers:\n continue\n self.return_qkv[out_i] = return_qkv\n elif isinstance(return_qkv, list) or isinstance(return_qkv, tuple):\n for i, out_i in enumerate(self.out_indices):\n if out_i >= num_layers:\n continue\n self.return_qkv[out_i] = return_qkv[i]\n else:\n raise TypeError('return_qkv must be type of bool, list or tuple')\n if self.return_clip_embed:\n self.return_qkv[num_layers - 1] = True\n\n self.skip_last_attn = skip_last_attn\n\n if self.num_prompt_tokens is not None:\n val = math.sqrt(6. / float(3 * reduce(mul, (patch_size, patch_size), 1) + embed_dims))\n\n self.deep_prompt_embeddings = nn.Parameter(torch.zeros(num_layers, self.num_prompt_tokens, embed_dims))\n nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)\n\n self.prompt_proj = nn.Linear(embed_dims, embed_dims)\n nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out')\n self.prompt_norm = LayerNorm(embed_dims, eps=1e-6)\n self.prompt_dropout = nn.Dropout(0.1)\n\n @property\n def norm0(self):\n return getattr(self, self.norm0_name)\n \n @property\n def norm1(self):\n return getattr(self, self.norm1_name)\n\n def init_weights(self):\n if (isinstance(self.init_cfg, dict)\n and self.init_cfg.get('type') == 'Pretrained'):\n logger = get_root_logger()\n checkpoint = _load_checkpoint(\n self.init_cfg['checkpoint'], logger=logger, map_location='cpu')\n\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint\n logger.info(msg='Remove backbone prefix from state_dict.')\n state_dict = {k.replace('backbone.', ''): v for k, v in state_dict.items()}\n\n if 'pos_embed' in state_dict.keys():\n if self.pos_embed.shape != state_dict['pos_embed'].shape:\n logger.info(msg=f'Resize the pos_embed shape from '\n f'{state_dict[\"pos_embed\"].shape} to '\n f'{self.pos_embed.shape}')\n h, w = self.img_size\n pos_size = int(\n math.sqrt(state_dict['pos_embed'].shape[1] - 1))\n state_dict['pos_embed'] = self.resize_pos_embed(\n state_dict['pos_embed'],\n (h // self.patch_size, w // self.patch_size),\n (pos_size, pos_size), self.interpolate_mode)\n\n if self.return_clip_embed:\n state_dict['proj.weight'] = state_dict['proj.weight'][:, :, None, None]\n else:\n state_dict.pop('proj.weight')\n\n print(self.load_state_dict(state_dict, False))\n elif self.init_cfg is not None:\n super(MaskClipVisionTransformer, self).init_weights()\n else:\n # We only implement the 'jax_impl' initialization implemented at\n # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n for n, m in self.named_modules():\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if m.bias is not None:\n if 'ffn' in n:\n nn.init.normal_(m.bias, mean=0., std=1e-6)\n else:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv2d):\n kaiming_init(m, mode='fan_in', bias=0.)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)):\n constant_init(m, val=1.0, bias=0.)\n\n def _pos_embeding(self, patched_img, hw_shape, pos_embed):\n \"\"\"Positiong embeding method.\n\n Resize the pos_embed, if the input image size doesn't match\n the training size.\n Args:\n patched_img (torch.Tensor): The patched image, it should be\n shape of [B, L1, C].\n hw_shape (tuple): The downsampled image resolution.\n pos_embed (torch.Tensor): The pos_embed weighs, it should be\n shape of [B, L2, c].\n Return:\n torch.Tensor: The pos encoded image feature.\n \"\"\"\n assert patched_img.ndim == 3 and pos_embed.ndim == 3, \\\n 'the shapes of patched_img and pos_embed must be [B, L, C]'\n x_len, pos_len = patched_img.shape[1], pos_embed.shape[1]\n if x_len != pos_len:\n if pos_len == (self.img_size[0] // self.patch_size) * (\n self.img_size[1] // self.patch_size) + 1:\n pos_h = self.img_size[0] // self.patch_size\n pos_w = self.img_size[1] // self.patch_size\n else:\n raise ValueError(\n 'Unexpected shape of pos_embed, got {}.'.format(\n pos_embed.shape))\n pos_embed = self.resize_pos_embed(pos_embed, hw_shape,\n (pos_h, pos_w),\n self.interpolate_mode)\n return self.drop_after_pos(patched_img + pos_embed)\n\n @staticmethod\n def resize_pos_embed(pos_embed, input_shpae, pos_shape, mode):\n \"\"\"Resize pos_embed weights.\n\n Resize pos_embed using bicubic interpolate method.\n Args:\n pos_embed (torch.Tensor): Position embedding weights.\n input_shpae (tuple): Tuple for (downsampled input image height,\n downsampled input image width).\n pos_shape (tuple): The resolution of downsampled origin training\n image.\n mode (str): Algorithm used for upsampling:\n ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |\n ``'trilinear'``. Default: ``'nearest'``\n Return:\n torch.Tensor: The resized pos_embed of shape [B, L_new, C]\n \"\"\"\n assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'\n pos_h, pos_w = pos_shape\n cls_token_weight = pos_embed[:, 0]\n pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]\n pos_embed_weight = pos_embed_weight.reshape(\n 1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2)\n pos_embed_weight = resize(\n pos_embed_weight, size=input_shpae, align_corners=False, mode=mode)\n cls_token_weight = cls_token_weight.unsqueeze(1)\n pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2)\n pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)\n return pos_embed\n\n def forward(self, inputs):\n B = inputs.shape[0]\n\n x, hw_shape = self.patch_embed(inputs)\n\n # stole cls_tokens impl from Phil Wang, thanks\n cls_tokens = self.cls_token.expand(B, -1, -1)\n x = torch.cat((cls_tokens, x), dim=1)\n x = self._pos_embeding(x, hw_shape, self.pos_embed)\n\n if not self.with_cls_token:\n # Remove class token for transformer encoder input\n x = x[:, 1:]\n\n if self.pre_norm:\n x = self.norm0(x)\n\n outs = []\n for i, layer in enumerate(self.layers):\n # add deep prompt\n if self.num_prompt_tokens is not None:\n deep_prompt_emb = self.prompt_dropout(self.prompt_proj(self.deep_prompt_embeddings[i]).expand(B, -1, -1))\n assert self.with_cls_token\n assert x.shape[1] == 1 + hw_shape[0] * hw_shape[1], x.shape\n x = torch.cat((\n x[:, :1, :],\n deep_prompt_emb,\n x[:, 1:, :]\n ), dim=1)\n assert x.shape[1] == 1 + self.num_prompt_tokens + hw_shape[0] * hw_shape[1], x.shape\n x, q, k, v = layer(x, self.return_qkv[i] \\\n or (i==len(self.layers)-1 and self.skip_last_attn))\n # remove deep prompt\n if self.num_prompt_tokens is not None:\n x = torch.cat((\n x[:, :1, :],\n x[:, 1+self.num_prompt_tokens:, :]\n ), dim=1)\n assert x.shape[1] == 1 + hw_shape[0] * hw_shape[1]\n if v is not None:\n v = torch.cat((\n v[:, :1, :],\n v[:, 1+self.num_prompt_tokens:, :]\n ), dim=1)\n assert v.shape[1] == 1 + hw_shape[0] * hw_shape[1]\n if i == len(self.layers) - 1:\n if self.final_norm:\n x = self.norm1(x)\n if self.return_qkv[i]:\n v = self.norm1(v)\n if self.skip_last_attn:\n if self.with_cls_token:\n x[:, 1:] = v[:, 1:]\n else:\n x = v\n if self.return_clip_embed:\n visual_embedding = v\n if self.with_cls_token:\n visual_embedding = visual_embedding[:, 1:]\n B, _, C = visual_embedding.shape\n visual_embedding = visual_embedding.reshape(B, hw_shape[0], hw_shape[1],\n C).permute(0, 3, 1, 2).contiguous()\n visual_embedding = self.proj(visual_embedding)\n visual_embedding = visual_embedding / visual_embedding.norm(dim=1, keepdim=True)\n if i in self.out_indices:\n if self.with_cls_token:\n # Remove class token and reshape token for decoder head\n out = x[:, 1:]\n else:\n out = x\n B, _, C = out.shape\n out = out.reshape(B, hw_shape[0], hw_shape[1],\n C).permute(0, 3, 1, 2).contiguous()\n if self.output_cls_token:\n out = [out, x[:, 0]]\n if self.return_qkv[i]:\n if self.with_cls_token:\n q = q[:, 1:]\n k = k[:, 1:]\n v = v[:, 1:]\n v = v.reshape(B, hw_shape[0], hw_shape[1],\n C).permute(0, 3, 1, 2).contiguous()\n out = [out, q, k, v]\n outs.append(out)\n\n if self.return_clip_embed:\n features = []\n for o in outs:\n if isinstance(o, list):\n # from return_qkv\n assert len(o) == 4\n features.append(o[3])\n else:\n features.append(o)\n if len(self.layers) in self.out_indices:\n features.append(visual_embedding)\n global_embedding = self.proj(x[:, 0][:, :, None, None])[:, :, 0, 0]\n global_embedding = global_embedding / global_embedding.norm(dim=1, keepdim=True)\n\n outs = [\n tuple(features),\n global_embedding\n ]\n\n return outs\n\n def train(self, mode=True):\n super(MaskClipVisionTransformer, self).train(mode)\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, nn.LayerNorm):\n m.eval()" }, { "identifier": "MaskClip2Head", "path": "third_party/maskclip/models/decode_heads/maskclip2_head.py", "snippet": "class MaskClip2Head(BaseDecodeHead):\n\n def __init__(self, img_size, **kwargs):\n super(MaskClip2Head, self).__init__(**kwargs)\n self.img_size = img_size\n\n def forward(self, inputs, force_output_pred_masks=False):\n assert force_output_pred_masks\n inputs_both = inputs\n inputs = inputs_both[0][0]\n cls_token = inputs_both[0][1]\n txt_embed = inputs_both[1]\n feat = inputs[-1]\n\n output = self.cls_seg(feat, txt_embed)\n\n output = F.interpolate(output, size=(self.img_size, self.img_size),\n mode='bilinear', align_corners=self.align_corners)\n output = {\"pred_masks\": output}\n\n return output\n\n def cls_seg(self, feat, txt_embed):\n txt_embed = txt_embed.to(feat.dtype)\n output = F.conv2d(feat, txt_embed[:, :, None, None])\n \n return output\n\n def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):\n raise RuntimeError('MaskClip is not trainable. Try MaskClip+ instead.')" }, { "identifier": "MaskClipHead", "path": "third_party/maskclip/models/decode_heads/maskclip_head.py", "snippet": "class MaskClipHead(BaseDecodeHead):\n\n def __init__(self, text_categories, text_channels, text_embeddings_path,\n visual_projs_path, vit=False, ks_thresh=0., pd_thresh=0.,\n attn_pooling=False, num_heads=32, **kwargs):\n super(MaskClipHead, self).__init__(**kwargs)\n\n self.text_categories = text_categories\n self.text_channels = text_channels\n self.text_embeddings_path = text_embeddings_path\n self.visual_projs_path = visual_projs_path\n\n if self.text_embeddings_path is None:\n self.text_embeddings = nn.Parameter(torch.zeros(text_categories, text_channels))\n nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)\n else:\n self.register_buffer('text_embeddings', torch.randn(text_categories, text_channels))\n self.load_text_embeddings()\n \n self.vit = vit\n if vit:\n self.proj = nn.Conv2d(self.in_channels, text_channels, 1, bias=False)\n else:\n self.q_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)\n self.k_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)\n self.v_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)\n self.c_proj = nn.Conv2d(self.in_channels, text_channels, 1)\n self.load_visual_projs()\n\n self.ks_thresh = ks_thresh\n self.pd_thresh = pd_thresh\n self.attn_pooling = attn_pooling\n self.num_heads = num_heads\n\n def init_weights(self):\n super(MaskClipHead, self).init_weights()\n if self.text_embeddings_path is None:\n nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)\n else:\n self.load_text_embeddings()\n self.load_visual_projs()\n\n def load_text_embeddings(self):\n loaded = torch.load(self.text_embeddings_path, map_location='cuda')\n self.text_embeddings[:, :] = loaded[:, :]\n print_log(f'Loaded text embeddings from {self.text_embeddings_path}', logger=get_root_logger())\n\n def load_visual_projs(self):\n loaded = torch.load(self.visual_projs_path, map_location='cuda')\n attrs = ['proj'] if self.vit else ['q_proj', 'k_proj', 'v_proj', 'c_proj']\n for attr in attrs:\n current_attr = getattr(self, attr)\n state_dict = loaded[attr]\n for key in state_dict:\n if 'weight' in key:\n state_dict[key] = state_dict[key][:, :, None, None]\n current_attr.load_state_dict(state_dict)\n print_log(f'Loaded proj weights from {self.visual_projs_path}', logger=get_root_logger())\n \n def forward(self, inputs):\n x = self._transform_inputs(inputs)\n q, k, v, cls_token = None, None, None, None\n if self.vit:\n if isinstance(x, list) and len(x) == 4:\n x, q, k, v = x\n if isinstance(x, list) and len(x) == 2:\n x, cls_token = x\n if v is not None:\n feat = self.proj(v)\n else:\n feat = self.proj(x)\n if cls_token is not None:\n cls_token = self.proj(cls_token[:, :, None, None])[:, :, 0, 0]\n else:\n if self.attn_pooling:\n N, C, H, W = x.shape\n x = x.view(N, C, -1).permute(2, 0, 1) # NCHW -> (HW)NC\n x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)\n x, _ = F.multi_head_attention_forward(\n query=x, key=x, value=x,\n embed_dim_to_check=x.shape[-1],\n num_heads=self.num_heads,\n q_proj_weight=self.q_proj.weight[:, :, 0, 0],\n k_proj_weight=self.k_proj.weight[:, :, 0, 0],\n v_proj_weight=self.v_proj.weight[:, :, 0, 0],\n in_proj_weight=None,\n in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),\n bias_k=None,\n bias_v=None,\n add_zero_attn=False,\n dropout_p=0,\n out_proj_weight=self.c_proj.weight[:, :, 0, 0],\n out_proj_bias=self.c_proj.bias,\n use_separate_proj_weight=True,\n training=self.training,\n need_weights=False\n )\n feat = x[1:].permute(1, 2, 0).view(N, -1, H, W)\n else:\n q = self.q_proj(x)\n k = self.k_proj(x)\n q = torch.flatten(q, start_dim=2).transpose(-2, -1)\n k = torch.flatten(k, start_dim=2).transpose(-2, -1)\n v = self.v_proj(x)\n feat = self.c_proj(v)\n output = self.cls_seg(feat)\n if not self.training:\n output = self.refine_output(output, k)\n\n return output\n\n def cls_seg(self, feat):\n feat = feat / feat.norm(dim=1, keepdim=True)\n output = F.conv2d(feat, self.text_embeddings[:, :, None, None])\n \n return output\n\n def refine_output(self, output, k):\n if self.pd_thresh > 0:\n N, C, H, W = output.shape\n _output = F.softmax(output*100, dim=1)\n max_cls_conf = _output.view(N, C, -1).max(dim=-1)[0]\n selected_cls = (max_cls_conf < self.pd_thresh)[:, :, None, None].expand(N, C, H, W)\n output[selected_cls] = -100\n\n if k is not None and self.ks_thresh > 0:\n output = F.softmax(output*100, dim=1)\n N, C, H, W = output.shape\n output = output.view(N, C, -1).transpose(-2, -1)\n # softmax\n # weight = k @ k.transpose(-2, -1)\n # weight = F.softmax(weight, dim=-1)\n # L2 distance\n k = F.normalize(k, p=2)\n weight = k @ k.transpose(-2, -1)\n\n selected_pos = (output.max(dim=-1, keepdim=True)[0] < self.ks_thresh)\n selected_pos = selected_pos.expand(-1, -1, C)\n\n weighted_output = weight @ output\n output[selected_pos] = weighted_output[selected_pos]\n output = output.transpose(-2, -1).view(N, C, H, W)\n\n return output\n\n def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):\n raise RuntimeError('MaskClip is not trainable. Try MaskClip+ instead.')" }, { "identifier": "DeepLabV3Plus", "path": "third_party/unimatch/model/semseg/deeplabv3plus.py", "snippet": "class DeepLabV3Plus(nn.Module):\n def __init__(self, cfg):\n super(DeepLabV3Plus, self).__init__()\n\n if 'resnet' in cfg['backbone']:\n self.backbone = resnet.__dict__[cfg['backbone']](pretrained=True, \n replace_stride_with_dilation=cfg['replace_stride_with_dilation'])\n else:\n assert cfg['backbone'] == 'xception'\n self.backbone = xception(pretrained=True)\n\n low_channels = 256\n high_channels = 2048\n\n self.head = ASPPModule(high_channels, cfg['dilations'])\n\n self.reduce = nn.Sequential(nn.Conv2d(low_channels, 48, 1, bias=False),\n nn.BatchNorm2d(48),\n nn.ReLU(True))\n\n self.fuse = nn.Sequential(nn.Conv2d(high_channels // 8 + 48, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n nn.Conv2d(256, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True))\n\n self.classifier = nn.Conv2d(256, cfg['nclass'], 1, bias=True)\n\n def forward(self, x, need_fp=False, only_fp=False):\n h, w = x.shape[-2:]\n\n feats = self.backbone.base_forward(x)\n c1, c4 = feats[0], feats[-1]\n\n if only_fp:\n out_fp = self._decode(nn.Dropout2d(0.5)(c1),\n nn.Dropout2d(0.5)(c4))\n out_fp = F.interpolate(out_fp, size=(h, w), mode=\"bilinear\", align_corners=True)\n return out_fp\n elif need_fp:\n outs = self._decode(torch.cat((c1, nn.Dropout2d(0.5)(c1))),\n torch.cat((c4, nn.Dropout2d(0.5)(c4))))\n outs = F.interpolate(outs, size=(h, w), mode=\"bilinear\", align_corners=True)\n out, out_fp = outs.chunk(2)\n\n return out, out_fp\n\n out = self._decode(c1, c4)\n out = F.interpolate(out, size=(h, w), mode=\"bilinear\", align_corners=True)\n\n return out\n\n def _decode(self, c1, c4):\n c4 = self.head(c4)\n c4 = F.interpolate(c4, size=c1.shape[-2:], mode=\"bilinear\", align_corners=True)\n\n c1 = self.reduce(c1)\n\n feature = torch.cat([c1, c4], dim=1)\n feature = self.fuse(feature)\n\n out = self.classifier(feature)\n\n return out" }, { "identifier": "SegLossPlus", "path": "third_party/zegclip/losses/atm_loss.py", "snippet": "class SegLossPlus(nn.Module):\n \"\"\"ATMLoss.\n \"\"\"\n def __init__(self,\n num_classes,\n dec_layers,\n mask_weight=20.0,\n dice_weight=1.0,\n loss_weight=1.0,\n use_point=False):\n super(SegLossPlus, self).__init__()\n weight_dict = {\"loss_mask\": mask_weight, \"loss_dice\": dice_weight}\n aux_weight_dict = {}\n for i in range(dec_layers - 1):\n aux_weight_dict.update({k + f\"_{i}\": v for k, v in weight_dict.items()})\n weight_dict.update(aux_weight_dict)\n\n self.criterion = SegPlusCriterion(\n num_classes,\n weight_dict=weight_dict,\n losses=[\"masks\"],\n )\n\n self.loss_weight = loss_weight\n\n def forward(self,\n outputs,\n label,\n ignore_index=255,\n ):\n \"\"\"Forward function.\"\"\"\n \n self.ignore_index = ignore_index\n targets = self.prepare_targets(label)\n losses = self.criterion(outputs, targets)\n\n for k in list(losses.keys()):\n if k in self.criterion.weight_dict:\n losses[k] = losses[k] * self.criterion.weight_dict[k] * self.loss_weight\n else:\n # remove this loss if not specified in `weight_dict`\n losses.pop(k)\n\n return losses\n\n def prepare_targets(self, targets):\n new_targets = []\n for targets_per_image in targets:\n # gt_cls\n gt_cls = targets_per_image.unique()\n gt_cls = gt_cls[gt_cls != self.ignore_index]\n masks = []\n for cls in gt_cls:\n masks.append(targets_per_image == cls)\n if len(gt_cls) == 0:\n masks.append(targets_per_image == self.ignore_index)\n\n masks = torch.stack(masks, dim=0)\n new_targets.append(\n {\n \"labels\": gt_cls,\n \"target_masks\": masks,\n \"masks\": targets_per_image,\n }\n )\n return new_targets" }, { "identifier": "CLIPVisionTransformer", "path": "third_party/zegclip/models/backbones/clip_vit.py", "snippet": "class CLIPVisionTransformer(nn.Module):\n def __init__(self, input_resolution=224, patch_size=32, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.0,\n out_indices=[3, 5, 7, 11], pretrained=None, get_embeddings=False, embed_v=False, **kwargs):\n super().__init__()\n self.pretrained = pretrained\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n self.spatial_size = input_resolution // patch_size\n self.ln_pre = LayerNorm(width)\n self.get_embeddings = get_embeddings\n self.embed_v = embed_v\n\n self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)\n\n self.out_indices = out_indices\n\n if get_embeddings:\n self.ln_post = LayerNorm(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.patch_size = patch_size\n\n def init_weights(self, pretrained=None):\n pretrained = pretrained or self.pretrained\n if isinstance(pretrained, str):\n checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()\n\n state_dict = {}\n\n for k in checkpoint.keys():\n if k.startswith('visual.'):\n new_k = k.replace('visual.', '')\n state_dict[new_k] = checkpoint[k]\n\n if 'positional_embedding' in state_dict.keys():\n if self.positional_embedding.shape != state_dict['positional_embedding'].shape:\n # (1025, 768) (197, 768) upsample the positional_embedding for larger input\n print(f'Resize the pos_embed shape from {state_dict[\"positional_embedding\"].shape} to {self.positional_embedding.shape}')\n cls_pos = state_dict[\"positional_embedding\"][0:1, :]\n if self.patch_size == 16:\n spatial_pos = F.interpolate(state_dict[\"positional_embedding\"][1:,].reshape(1, 14, 14, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')\n elif self.patch_size == 32:\n spatial_pos = F.interpolate(state_dict[\"positional_embedding\"][1:,].reshape(1, 7, 7, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')\n else:\n assert ValueError('Patch Size should be 16 or 32')\n spatial_pos = spatial_pos.reshape(768, self.spatial_size*self.spatial_size).permute(1, 0)\n positional_embedding = torch.cat([cls_pos, spatial_pos], dim=0)\n state_dict['positional_embedding'] = positional_embedding\n assert self.positional_embedding.shape == state_dict['positional_embedding'].shape\n\n u, w = self.load_state_dict(state_dict, False)\n print(u, w, 'are misaligned params in vision transformer')\n\n\n def forward(self, x: torch.Tensor):\n x = self.conv1(x)\n B, C, H, W = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1)\n x = x.permute(0, 2, 1)\n x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)\n\n pos = self.positional_embedding.to(x.dtype)\n cls_pos = pos[0,:] + self.class_embedding.to(x.dtype)\n spatial_pos = F.interpolate(pos[1:,].reshape(1, self.spatial_size, self.spatial_size, C).permute(0, 3, 1, 2), size=(H, W), mode='bilinear')\n spatial_pos = spatial_pos.reshape(1, C, H*W).permute(0, 2, 1)\n pos = torch.cat([cls_pos.reshape(1, 1, C), spatial_pos], dim=1)\n x = x + pos\n x = self.ln_pre(x)\n x = x.permute(1, 0, 2) # NLD -> LND\n\n features = []\n outs = []\n for i, blk in enumerate(self.transformer.resblocks):\n if self.embed_v and i == len(self.transformer.resblocks) - 1:\n y = blk.ln_1(x)\n y = F.linear(y, blk.attn.in_proj_weight, blk.attn.in_proj_bias)\n y_N, y_L, y_C = y.shape\n y = y.view(y_N, y_L, 3, y_C//3).permute(2, 0, 1, 3).reshape(3*y_N, y_L, y_C//3)\n y = F.linear(y, blk.attn.out_proj.weight, blk.attn.out_proj.bias)\n q, k, v = y.tensor_split(3, dim=0)\n v += x\n v = v + blk.mlp(blk.ln_2(v))\n x = blk(x)\n if len(self.out_indices) > 1:\n if i in self.out_indices:\n xp = x.permute(1, 0, 2)[:, 1:, :].permute(0, 2, 1).reshape(B, -1, H, W)\n features.append(xp.contiguous())\n\n if self.get_embeddings:\n x = x.permute(1, 0, 2)\n x = self.ln_post(x)\n x = x @ self.proj\n\n global_embedding = x[:, 0]\n if self.embed_v:\n v = v.permute(1, 0, 2)\n v = self.ln_post(v)\n v = v @ self.proj\n visual_embedding = v[:, 1:].reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()\n else:\n visual_embedding = x[:, 1:].reshape(B, H, W, -1).permute(0, 3, 1, 2)\n\n if len(self.out_indices) == 1:\n visual_embedding = visual_embedding / visual_embedding.norm(dim=1, keepdim=True)\n features.append(visual_embedding)\n\n outs.append(tuple(features))\n global_embedding = global_embedding / global_embedding.norm(dim=1, keepdim=True)\n outs.append(global_embedding)\n return outs" }, { "identifier": "VPTCLIPVisionTransformer", "path": "third_party/zegclip/models/backbones/clip_vpt_vit.py", "snippet": "class VPTCLIPVisionTransformer(nn.Module):\n def __init__(self, input_resolution=224, patch_size=32, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.0,\n out_indices=[3, 5, 7, 11], pretrained=None, get_embeddings=False, embed_v=False,\n num_tokens=20, prompt_dim=512, total_d_layer=11, **kwargs):\n super().__init__()\n self.pretrained = pretrained\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n self.spatial_size = input_resolution // patch_size\n self.ln_pre = LayerNorm(width)\n self.get_embeddings = get_embeddings\n self.embed_v = embed_v\n self.num_layers = layers\n\n self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)\n\n self.out_indices = out_indices\n\n if get_embeddings:\n self.ln_post = LayerNorm(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.patch_size = patch_size\n\n ## Setting of visual prompt tuning\n self.num_tokens = num_tokens\n self.prompt_dim = prompt_dim\n self.total_d_layer = total_d_layer\n\n ## Add the prompt parameters # exclude_key=prompt:\n self._init_prompt(patch_size, self.num_tokens, self.prompt_dim, self.total_d_layer)\n\n def _init_prompt(self, patch, num_tokens, prompt_dim, total_d_layer):\n patch_size = []\n patch_size.append(patch)\n patch_size.append(patch)\n val = math.sqrt(6. / float(3 * reduce(mul, patch_size, 1) + prompt_dim)) # noqa\n\n if total_d_layer >= 0:\n self.prompt_embeddings = nn.Parameter(torch.zeros(1, num_tokens, prompt_dim))\n # xavier_uniform initialization\n nn.init.uniform_(self.prompt_embeddings.data, -val, val)\n\n if total_d_layer > 0: # noqa\n self.deep_prompt_embeddings = nn.Parameter(torch.zeros(total_d_layer, num_tokens, prompt_dim))\n # xavier_uniform initialization\n nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)\n\n self.prompt_proj = nn.Linear(prompt_dim, prompt_dim)\n nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out')\n self.prompt_norm = LayerNorm(prompt_dim, eps=1e-6)\n self.prompt_dropout = Dropout(0.1)\n\n else: # total_d_layer < 0\n raise ValueError(f'Invalid total_d_layer={self.total_d_layer}.')\n\n\n def init_weights(self, pretrained=None):\n pretrained = pretrained or self.pretrained\n if isinstance(pretrained, str):\n checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()\n\n state_dict = {}\n\n for k in checkpoint.keys():\n if k.startswith('visual.'):\n new_k = k.replace('visual.', '')\n state_dict[new_k] = checkpoint[k]\n\n if 'positional_embedding' in state_dict.keys():\n if self.positional_embedding.shape != state_dict['positional_embedding'].shape:\n # (1025, 768) (197, 768) upsample the positional_embedding for larger input\n print(f'Resize the pos_embed shape from {state_dict[\"positional_embedding\"].shape} to {self.positional_embedding.shape}')\n cls_pos = state_dict[\"positional_embedding\"][0:1, :]\n if self.patch_size == 16:\n spatial_pos = F.interpolate(state_dict[\"positional_embedding\"][1:,].reshape(1, 14, 14, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')\n else:\n assert ValueError('Patch Size should be 16 or 32')\n spatial_pos = spatial_pos.reshape(768, self.spatial_size*self.spatial_size).permute(1, 0)\n positional_embedding = torch.cat([cls_pos, spatial_pos], dim=0)\n state_dict['positional_embedding'] = positional_embedding\n assert self.positional_embedding.shape == state_dict['positional_embedding'].shape\n\n u, w = self.load_state_dict(state_dict, False)\n print(u, w, 'are misaligned params in vision transformer')\n\n\n def forward(self, x: torch.Tensor):\n x = self.conv1(x)\n B, C, H, W = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1)\n x = x.permute(0, 2, 1)\n x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)\n\n pos = self.positional_embedding.to(x.dtype)\n cls_pos = pos[0,:] + self.class_embedding.to(x.dtype)\n spatial_pos = F.interpolate(pos[1:,].reshape(1, self.spatial_size, self.spatial_size, C).permute(0, 3, 1, 2), size=(H, W), mode='bilinear')\n spatial_pos = spatial_pos.reshape(1, C, H*W).permute(0, 2, 1)\n pos = torch.cat([cls_pos.reshape(1, 1, C), spatial_pos], dim=1)\n x = x + pos\n x = self.ln_pre(x)\n\n if self.total_d_layer >=0:\n # concat prompt\n x = torch.cat((\n x[:, :1, :],\n self.prompt_dropout(self.prompt_proj(self.prompt_embeddings).expand(B, -1, -1)),\n x[:, 1:, :]\n ), dim=1)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n\n features = []\n outs = []\n if self.total_d_layer == 0: #shallow\n assert not self.embed_v\n for i, blk in enumerate(self.transformer.resblocks):\n x = blk(x)\n if len(self.out_indices) > 1:\n if i in self.out_indices:\n xp = x.permute(1, 0, 2)[:, 1+self.num_tokens:, :].permute(0, 2, 1).reshape(B, -1, H, W)\n features.append(xp.contiguous())\n elif self.total_d_layer > 0: # deep\n x, features, v = self.forward_deep_prompt(x, features, H, W)\n else:\n raise NotImplementedError(f'Invalid total_d_layer={self.total_d_layer}.')\n\n if self.get_embeddings:\n x = x.permute(1, 0, 2)\n x = self.ln_post(x)\n x = x @ self.proj\n\n global_embedding = x[:, 0]\n if self.embed_v:\n v = v.permute(1, 0, 2)\n v = self.ln_post(v)\n v = v @ self.proj\n visual_embedding = v[:, -(H*W):].reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()\n else:\n visual_embedding = x[:, -(H*W):].reshape(B, H, W, -1).permute(0, 3, 1, 2)\n\n if len(self.out_indices) == 1:\n visual_embedding = visual_embedding / visual_embedding.norm(dim=1, keepdim=True)\n features.append(visual_embedding)\n\n outs.append(tuple(features))\n global_embedding = global_embedding / global_embedding.norm(dim=1, keepdim=True)\n outs.append(global_embedding)\n return outs\n\n\n def forward_deep_prompt(self, embedding_output, features, H, W, out_last=False):\n B = embedding_output.shape[1]\n v = None\n\n for i in range(self.num_layers):\n if i == 0:\n hidden_states = self.transformer.resblocks[i](embedding_output)\n elif i <= self.deep_prompt_embeddings.shape[0]:\n deep_prompt_emb = self.prompt_dropout(self.prompt_proj(self.deep_prompt_embeddings[i-1]).expand(B, -1, -1)).permute(1, 0, 2)\n hidden_states = torch.cat((\n hidden_states[:1, :, :],\n deep_prompt_emb,\n hidden_states[(1+self.num_tokens):, :, :]\n ), dim=0)\n if self.embed_v and i == self.num_layers - 1:\n x = hidden_states\n blk = self.transformer.resblocks[i]\n y = blk.ln_1(x)\n y = F.linear(y, blk.attn.in_proj_weight, blk.attn.in_proj_bias)\n y_N, y_L, y_C = y.shape\n y = y.view(y_N, y_L, 3, y_C//3).permute(2, 0, 1, 3).reshape(3*y_N, y_L, y_C//3)\n y = F.linear(y, blk.attn.out_proj.weight, blk.attn.out_proj.bias)\n q, k, v = y.tensor_split(3, dim=0)\n v += x\n v = v + blk.mlp(blk.ln_2(v))\n\n hidden_states = self.transformer.resblocks[i](hidden_states)\n else:\n assert not self.embed_v\n hidden_states = torch.cat((\n hidden_states[:1, :, :],\n hidden_states[-(H*W):, :, :]\n ), dim=0)\n hidden_states = self.transformer.resblocks[i](hidden_states)\n\n if len(self.out_indices) > 1:\n if i in self.out_indices:\n xp = hidden_states.permute(1, 0, 2)[:, -(H*W):, :].permute(0, 2, 1).reshape(B, -1, H, W)\n features.append(xp.contiguous())\n\n if i == (self.num_layers-2): #10\n before_last_feats = self.prompt_norm(hidden_states)\n\n encoded = self.prompt_norm(hidden_states)\n if out_last:\n return before_last_feats\n else:\n return encoded, features, v" }, { "identifier": "CLIPTextEncoder", "path": "third_party/zegclip/models/backbones/text_encoder.py", "snippet": "class CLIPTextEncoder(nn.Module):\n def __init__(self, context_length=77,\n vocab_size=49408,\n transformer_width=512,\n transformer_heads=8,\n transformer_layers=12,\n embed_dim=1024,\n out_dim=256,\n pretrained=None, **kwargs):\n super().__init__()\n\n self.pretrained = pretrained\n\n self.context_length = context_length\n\n self.transformer = Transformer(\n width=transformer_width,\n layers=transformer_layers,\n heads=transformer_heads,\n attn_mask=self.build_attention_mask()\n )\n\n self.vocab_size = vocab_size\n self.token_embedding = nn.Embedding(vocab_size, transformer_width)\n self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))\n self.ln_final = LayerNorm(transformer_width)\n self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))\n \n def init_weights(self, pretrained=None):\n pretrained = pretrained or self.pretrained\n if isinstance(pretrained, str):\n checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()\n\n state_dict = {}\n\n for k in checkpoint.keys():\n if k.startswith('transformer.'):\n state_dict[k] = checkpoint[k]\n \n if k == 'positional_embedding' or k == 'text_projection' or k.startswith('token_embedding') or k.startswith('ln_final'):\n if k == 'positional_embedding' and checkpoint[k].size(0) > self.context_length:\n checkpoint[k] = checkpoint[k][:self.context_length]\n print('positional_embedding is tuncated from 77 to', self.context_length)\n state_dict[k] = checkpoint[k]\n \n u, w = self.load_state_dict(state_dict, False)\n print(u, w, 'are misaligned params in text encoder')\n\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the vision tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.context_length, self.context_length)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n def forward(self, text):\n x = self.token_embedding(text)\n x = x + self.positional_embedding \n x = x.permute(1, 0, 2)\n x = self.transformer(x)\n x = x.permute(1, 0, 2)\n x = self.ln_final(x)\n x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n return x" }, { "identifier": "DropPath", "path": "third_party/zegclip/models/backbones/utils.py", "snippet": "class DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)\n \n def extra_repr(self) -> str:\n return 'p={}'.format(self.drop_prob)" }, { "identifier": "ATMSingleHeadSeg", "path": "third_party/zegclip/models/decode_heads/atm_head.py", "snippet": "class ATMSingleHeadSeg(BaseDecodeHead):\n def __init__(\n self,\n img_size,\n in_channels,\n seen_idx,\n all_idx,\n ignore_seen_pseudo_labels=True,\n embed_dims=768,\n num_layers=3,\n num_heads=8,\n use_stages=1,\n use_proj=True,\n crop_train=False,\n use_rd=True,\n use_aspp=False,\n aspp_relu=True,\n aspp_bn=True,\n aspp_residual=False,\n dilations=(6, 12, 18),\n **kwargs,\n ):\n super(ATMSingleHeadSeg, self).__init__(\n in_channels=in_channels, **kwargs)\n\n self.image_size = img_size\n self.use_stages = use_stages\n self.crop_train = crop_train\n self.use_rd = use_rd\n self.use_aspp = use_aspp\n self.aspp_residual = aspp_residual\n self.seen_idx = seen_idx\n self.all_idx = all_idx\n self.ignore_seen_pseudo_labels = ignore_seen_pseudo_labels\n self.debug = False\n nhead = num_heads\n dim = embed_dims\n input_proj = []\n proj_norm = []\n atm_decoders = []\n\n self.unseen_idx = self.all_idx.copy()\n for i_idx in self.seen_idx:\n self.unseen_idx.remove(i_idx)\n\n if self.use_aspp:\n self.aspp = ASPPModule(dim, dilations, out_channels=dim, bn=aspp_bn, relu=aspp_relu)\n self.add_module(\"aspp\", self.aspp)\n\n for i in range(self.use_stages):\n # FC layer to change ch\n if use_proj:\n proj = nn.Linear(self.in_channels, dim)\n trunc_normal_(proj.weight, std=.02)\n else:\n proj = nn.Identity()\n self.add_module(\"input_proj_{}\".format(i + 1), proj)\n input_proj.append(proj)\n # norm layer\n if use_proj:\n norm = nn.LayerNorm(dim)\n else:\n norm = nn.Identity()\n self.add_module(\"proj_norm_{}\".format(i + 1), norm)\n proj_norm.append(norm)\n # decoder layer\n decoder_layer = TPN_DecoderLayer(d_model=dim, nhead=nhead, dim_feedforward=dim * 4)\n decoder = TPN_Decoder(decoder_layer, num_layers)\n self.add_module(\"decoder_{}\".format(i + 1), decoder)\n atm_decoders.append(decoder)\n\n self.input_proj = input_proj\n self.proj_norm = proj_norm\n self.decoder = atm_decoders\n\n delattr(self, 'conv_seg')\n\n self.q_proj = nn.Linear(dim * 2 if self.use_rd else dim, dim)\n\n def init_weights(self):\n for n, m in self.named_modules():\n if isinstance(m, nn.Linear):\n trunc_normal_init(m, std=.02, bias=0)\n elif isinstance(m, nn.LayerNorm):\n constant_init(m, val=1.0, bias=0.0)\n\n def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg, self_training=False):\n seg_logits = self.forward(inputs)\n\n if self.debug:\n pred = seg_logits['pred_masks'].detach().sigmoid().argmax(dim=1).unsqueeze(1)\n pred_cpu = pred.detach().cpu()\n gt_semantic_seg_cpu = gt_semantic_seg.detach().cpu()\n self.debug_output = [[\n dict(title='Prediction', data=pred_cpu[i], type='label'),\n dict(title='Ground Truth', data=gt_semantic_seg_cpu[i], type='label'),\n ] for i in range(pred.shape[0])]\n\n # print('Self-training: 'self_training)\n if self_training:\n pseudo_semantic_masks = seg_logits['pred_masks'].clone().detach().sigmoid()\n if self.ignore_seen_pseudo_labels:\n pseudo_semantic_masks[:, self.seen_idx, :, :] = -1\n assert -1 not in pseudo_semantic_masks.max(dim=1)[0], \\\n 'All classes are set to -1, which results in argmax defaulting to label 0'\n pseudo_semantic_seg = pseudo_semantic_masks.argmax(dim=1).unsqueeze(1)\n # generate pseudo labels for \"transductive\" setting\n # print('Before pseudo-labelling', gt_semantic_seg[0, 0])\n gt_semantic_seg[gt_semantic_seg==-1] = pseudo_semantic_seg[gt_semantic_seg==-1]\n gt_semantic_seg[gt_semantic_seg==-1] = 255\n # print('After pseudo-labelling', gt_semantic_seg[0, 0])\n losses = self.losses(seg_logits, gt_semantic_seg)\n\n else:\n gt_semantic_seg[gt_semantic_seg==-1] = 255\n losses = self.losses(seg_logits, gt_semantic_seg)\n\n if self.debug and self_training:\n for i in range(gt_semantic_seg.shape[0]):\n self.debug_output[i].append(\n dict(title='Pseudo Label', data=gt_semantic_seg[i].detach().cpu(), type='label'))\n\n return losses\n\n def forward_test(self, inputs, img_metas, test_cfg, self_training):\n return self.forward(inputs, self_training)\n\n def forward(self, inputs_both, self_training=None, force_output_pred_masks=False):\n inputs = inputs_both[0][0]\n cls_token = inputs_both[0][1]\n text_token = inputs_both[1]\n \n x = []\n for stage_ in inputs[:self.use_stages]:\n x.append(self.d4_to_d3(stage_) if stage_.dim() > 3 else stage_)\n x.reverse()\n bs = x[0].size()[0]\n\n laterals = []\n attns = []\n maps_size = []\n qs = []\n\n for idx, (x_, proj_, norm_) in enumerate(zip(x, self.input_proj, self.proj_norm)):\n lateral = norm_(proj_(x_))\n if idx == 0:\n laterals.append(lateral)\n else:\n if laterals[idx - 1].size()[1] == lateral.size()[1]:\n laterals.append(lateral + laterals[idx - 1])\n else:\n # nearest interpolate\n l_ = self.d3_to_d4(laterals[idx - 1])\n l_ = F.interpolate(l_, scale_factor=2, mode=\"nearest\")\n l_ = self.d4_to_d3(l_)\n laterals.append(l_ + lateral)\n\n lateral = laterals[-1]\n if self.use_aspp:\n if self.aspp_residual:\n lateral = lateral + 0.01 * self.d4_to_d3(self.aspp(self.d3_to_d4(lateral)))\n else:\n lateral = self.d3_to_d4(lateral)\n lateral = self.aspp(lateral)\n lateral = self.d4_to_d3(lateral)\n\n q = self.q_proj(self.get_qs(text_token, cls_token))\n q = q.transpose(0,1)\n\n for idx, decoder_ in enumerate(self.decoder):\n q_, attn_ = decoder_(q, lateral.transpose(0, 1))\n for q, attn in zip(q_, attn_):\n attn = attn.transpose(-1, -2) \n attn = self.d3_to_d4(attn)\n maps_size.append(attn.size()[-2:])\n qs.append(q.transpose(0, 1))\n attns.append(attn)\n qs = torch.stack(qs, dim=0)\n\n outputs_seg_masks = []\n size = maps_size[-1]\n\n for i_attn, attn in enumerate(attns):\n if attn.shape[1] != self.num_classes:\n cls2con = get_class_to_concept_idxs(self.load_text_embedding)\n attn = aggregate_concept_predictions(attn, cls2con)\n assert attn.shape[1] == self.num_classes\n outputs_seg_masks.append(F.interpolate(attn, size=size, mode='bilinear', align_corners=False))\n\n pred = F.interpolate(outputs_seg_masks[-1],\n size=(self.image_size, self.image_size),\n mode='bilinear', align_corners=False)\n \n out = {\"pred_masks\": pred}\n\n \n if self.training or force_output_pred_masks:\n outputs_seg_masks = torch.stack(outputs_seg_masks, dim=0)# (3, bs, 20, 14, 14)\n else:\n if self_training:\n out[\"pred\"] = self.semantic_inference(out[\"pred_masks\"], self.seen_idx) #(bs, 20, 224, 224)\n else:\n out[\"pred\"] = self.semantic_inference(out[\"pred_masks\"], self.seen_idx, 0.1)\n return out[\"pred\"] \n return out\n\n def semantic_inference(self, mask_pred, seen_idx, weight=0.0):\n mask_pred = mask_pred.sigmoid()\n mask_pred[:,seen_idx] = mask_pred[:,seen_idx] - weight\n return mask_pred\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_seg_masks):\n return [\n {\"pred_masks\": a}\n # for a in zip(outputs_seg_masks[:-1])\n for a in outputs_seg_masks[:-1]\n ]\n\n def d3_to_d4(self, t):\n n, hw, c = t.size()\n if hw % 2 != 0:\n t = t[:, 1:]\n h = w = int(math.sqrt(hw))\n assert h * w == hw\n return t.transpose(1, 2).reshape(n, c, h, w)\n\n def d4_to_d3(self, t):\n return t.flatten(-2).transpose(-1, -2)\n\n def get_qs(self, q, cls):\n # q = [q.cls, q]\n C, dim = q.shape\n bs, _ = cls.shape\n q = q.expand(bs, -1, -1)\n if self.use_rd:\n q1 = torch.einsum(\"bd,bcd->bcd\", cls, q)\n q_ = torch.concat((q1, q), dim=-1)\n else:\n q_ = q.to(cls.dtype)\n return q_\n\n\n @force_fp32(apply_to=('seg_logit',))\n def losses(self, seg_logit, seg_label, num_classes=None):\n \"\"\"Compute segmentation loss.\"\"\"\n if isinstance(seg_logit, dict):\n # atm loss\n seg_label = seg_label.squeeze(1)\n\n loss = self.loss_decode(\n seg_logit,\n seg_label,\n ignore_index = self.ignore_index)\n\n loss['acc_seg'] = accuracy(seg_logit[\"pred_masks\"], seg_label, ignore_index=self.ignore_index)\n return loss" } ]
import types import torch from functools import reduce from mmcv.utils import Config from mmseg.models import ASPPHead, DepthwiseSeparableASPPHead, build_segmentor from mmseg.ops import resize from torch.nn import functional as F from model.backbone.timm_vit import TIMMVisionTransformer from model.decode_heads.dlv3p_head import DLV3PHead from model.decode_heads.vlg_head import VLGHead from model.vlm import VLM from third_party.maskclip.models.backbones.maskclip_vit import MaskClipVisionTransformer from third_party.maskclip.models.decode_heads.maskclip2_head import MaskClip2Head from third_party.maskclip.models.decode_heads.maskclip_head import MaskClipHead from third_party.unimatch.model.semseg.deeplabv3plus import DeepLabV3Plus from third_party.zegclip.losses.atm_loss import SegLossPlus from third_party.zegclip.models.backbones.clip_vit import CLIPVisionTransformer from third_party.zegclip.models.backbones.clip_vpt_vit import VPTCLIPVisionTransformer from third_party.zegclip.models.backbones.text_encoder import CLIPTextEncoder from third_party.zegclip.models.backbones.utils import DropPath from third_party.zegclip.models.decode_heads.atm_head import ATMSingleHeadSeg
19,331
# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def nested_set(dic, key, value): keys = key.split('.') for key in keys[:-1]: dic = dic.setdefault(key, {}) dic[keys[-1]] = value def nested_get(dictionary, keys, default=None): return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("."), dictionary) def is_vlm(obj):
# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def nested_set(dic, key, value): keys = key.split('.') for key in keys[:-1]: dic = dic.setdefault(key, {}) dic[keys[-1]] = value def nested_get(dictionary, keys, default=None): return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("."), dictionary) def is_vlm(obj):
return isinstance(obj, VLM)
3
2023-11-02 14:49:38+00:00
24k
codefuse-ai/Collinear-Constrained-Attention
model/build_model.py
[ { "identifier": "get_model_params_num", "path": "utils/common_utils.py", "snippet": "def get_model_params_num(model):\n \"\"\"\n Get params number of the model\n Args:\n model: model(required)\n Returns:\n the number of parameters of model\n \"\"\"\n num = 0\n for _, param in model.named_parameters():\n num += param.nelement()\n return num" }, { "identifier": "GPTNeoXConfig", "path": "model/gpt_neox/configuration_gpt_neox.py", "snippet": "class GPTNeoXConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`GPTNeoXModel`]. It is used to instantiate an\n GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a configuration\n with the defaults will yield a similar configuration to that of the GPTNeoX\n [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 50432):\n Vocabulary size of the GPTNeoX model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`GPTNeoXModel`].\n hidden_size (`int`, *optional*, defaults to 6144):\n Dimension of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 44):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 64):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 24576):\n Dimension of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n rotary_pct (`float`, *optional*, defaults to 0.25):\n percentage of hidden dimensions to allocate to rotary embeddings\n rotary_emb_base (`int`, *optional*, defaults to 10000)\n base for computing rotary embeddings frequency\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 1e-5):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n use_parallel_residual (`bool`, *optional*, defaults to `True`):\n Whether to use a \"parallel\" formulation in each Transformer layer, which can provide a slight training\n speedup at large scales (e.g. 20B).\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n Example:\n\n ```python\n >>> from transformers import GPTNeoXConfig, GPTNeoXModel\n\n >>> # Initializing a GPTNeoX gpt-neox-20b style configuration\n >>> configuration = GPTNeoXConfig()\n\n >>> # Initializing a model (with random weights) from the gpt-neox-20b style configuration\n >>> model = GPTNeoXModel(configuration) # doctest: +SKIP\n\n >>> # Accessing the model configuration\n >>> configuration = model.config # doctest: +SKIP\n ```\"\"\"\n model_type = \"gpt_neox\"\n\n def __init__(\n self,\n vocab_size=50432,\n hidden_size=6144,\n num_hidden_layers=44,\n num_attention_heads=64,\n intermediate_size=24576,\n hidden_act=\"gelu\",\n rotary_pct=0.25,\n rotary_emb_base=10000,\n max_position_embeddings=2048,\n initializer_range=0.02,\n layer_norm_eps=1e-5,\n use_cache=True,\n bos_token_id=0,\n eos_token_id=2,\n tie_word_embeddings=False,\n use_parallel_residual=True,\n rope_scaling=None,\n **kwargs\n ):\n super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.rotary_pct = rotary_pct\n self.rotary_emb_base = rotary_emb_base\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.use_cache = use_cache\n self.tie_word_embeddings = tie_word_embeddings\n self.use_parallel_residual = use_parallel_residual\n self.rope_scaling = rope_scaling\n self._rope_scaling_validation()\n\n if self.hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size is not divisble by the number of attention heads! Make sure to update them!\"\n )\n\n # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation\n def _rope_scaling_validation(self):\n \"\"\"\n Validate the `rope_scaling` configuration.\n \"\"\"\n if self.rope_scaling is None:\n return\n\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\n raise ValueError(\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\n f\"got {self.rope_scaling}\"\n )\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\n raise ValueError(\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\n )\n if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:\n raise ValueError(f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\")" }, { "identifier": "GPTNeoXForCausalLM", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel):\n\n # _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.gpt_neox = GPTNeoXModel(config)\n self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.embed_out\n\n def set_output_embeddings(self, new_embeddings):\n self.embed_out = new_embeddings\n\n @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are\n only required when the model is used as a decoder in a Sequence to Sequence model.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are\n ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig\n >>> import torch\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"EleutherAI/gpt-neox-20b\")\n >>> config = GPTNeoXConfig.from_pretrained(\"EleutherAI/gpt-neox-20b\")\n >>> config.is_decoder = True\n >>> model = GPTNeoXForCausalLM.from_pretrained(\"EleutherAI/gpt-neox-20b\", config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.gpt_neox(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n lm_logits = self.embed_out(hidden_states)\n\n lm_loss = None\n if labels is not None:\n # move labels to correct device to enable model parallelism\n labels = labels.to(lm_logits.device)\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shift_logits = lm_logits[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n input_shape = input_ids.shape\n\n # cut decoder_input_ids if past is used\n if past_key_values and past_key_values[0] is not None:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"attention_mask\": attention_mask,\n \"past_key_values\": past_key_values,\n \"position_ids\": position_ids,\n }\n )\n\n return model_inputs\n\n def _reorder_cache(self, past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past" }, { "identifier": "GPTNeoXTokenizerFast", "path": "model/gpt_neox/tokenization_gpt_neox_fast.py", "snippet": "class GPTNeoXTokenizerFast(PreTrainedTokenizerFast):\n \"\"\"\n Construct a \"fast\" GPT-NeoX-20B tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level\n Byte-Pair-Encoding.\n\n This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will\n be encoded differently whether it is at the beginning of the sentence (without space) or not:\n\n ```python\n >>> from transformers import GPTNeoXTokenizerFast\n\n >>> tokenizer = GPTNeoXTokenizerFast.from_pretrained(\"gpt2\")\n >>> tokenizer(\"Hello world\")[\"input_ids\"]\n [15496, 995]\n\n >>> tokenizer(\" Hello world\")[\"input_ids\"]\n [18435, 995]\n ```\n\n You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since\n the model was not pretrained this way, it might yield a decrease in performance.\n\n <Tip>\n\n When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.\n\n </Tip>\n\n This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\n refer to this superclass for more information regarding those methods.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n merges_file (`str`):\n Path to the merges file.\n errors (`str`, *optional*, defaults to `\"replace\"`):\n Paradigm to follow when decoding bytes to UTF-8. See\n [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.\n unk_token (`str`, *optional*, defaults to `<|endoftext|>`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n bos_token (`str`, *optional*, defaults to `<|endoftext|>`):\n The beginning of sequence token.\n eos_token (`str`, *optional*, defaults to `<|endoftext|>`):\n The end of sequence token.\n add_prefix_space (`bool`, *optional*, defaults to `False`):\n Whether or not to add an initial space to the input. This allows to treat the leading word just as any\n other word. (GPTNeoX tokenizer detect beginning of words by the preceding space).\n trim_offsets (`bool`, *optional*, defaults to `True`):\n Whether or not the post-processing step should trim offsets to avoid including whitespaces.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file=None,\n merges_file=None,\n tokenizer_file=None,\n unk_token=\"<|endoftext|>\",\n bos_token=\"<|endoftext|>\",\n eos_token=\"<|endoftext|>\",\n add_prefix_space=False,\n **kwargs,\n ):\n super().__init__(\n vocab_file,\n merges_file,\n tokenizer_file=tokenizer_file,\n unk_token=unk_token,\n bos_token=bos_token,\n eos_token=eos_token,\n add_prefix_space=add_prefix_space,\n **kwargs,\n )\n\n pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())\n if pre_tok_state.get(\"add_prefix_space\", add_prefix_space) != add_prefix_space:\n pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop(\"type\"))\n pre_tok_state[\"add_prefix_space\"] = add_prefix_space\n self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)\n\n self.add_prefix_space = add_prefix_space\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n files = self._tokenizer.model.save(save_directory, name=filename_prefix)\n return tuple(files)\n\n def _build_conversation_input_ids(self, conversation: \"Conversation\") -> List[int]:\n \"\"\"This corresponds to DialoGPT variants of models.\"\"\"\n input_ids = []\n for is_user, text in conversation.iter_texts():\n input_ids.extend(self.encode(text, add_special_tokens=False) + [self.eos_token_id])\n\n if len(input_ids) > self.model_max_length:\n input_ids = input_ids[-self.model_max_length :]\n return input_ids" }, { "identifier": "LlamaConfig", "path": "model/llama/configuration_llama.py", "snippet": "class LlamaConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the LLaMA-7B.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 11008):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\n `num_attention_heads`.\n pretraining_tp (`int`, *optional*, defaults to `1`):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\n issue](https://github.com/pytorch/pytorch/issues/76232).\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n\n Example:\n\n ```python\n >>> from transformers import LlamaModel, LlamaConfig\n\n >>> # Initializing a LLaMA llama-7b style configuration\n >>> configuration = LlamaConfig()\n\n >>> # Initializing a model from the llama-7b style configuration\n >>> model = LlamaModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"llama\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=32000,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n num_key_value_heads=None,\n hidden_act=\"silu\",\n max_position_embeddings=2048,\n initializer_range=0.02,\n rms_norm_eps=1e-6,\n use_cache=True,\n pad_token_id=None,\n bos_token_id=1,\n eos_token_id=2,\n pretraining_tp=1,\n tie_word_embeddings=False,\n rope_scaling=None,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n\n # for backward compatibility\n if num_key_value_heads is None:\n num_key_value_heads = num_attention_heads\n\n self.num_key_value_heads = num_key_value_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.pretraining_tp = pretraining_tp\n self.use_cache = use_cache\n self.rope_scaling = rope_scaling\n self._rope_scaling_validation()\n\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n tie_word_embeddings=tie_word_embeddings,\n **kwargs,\n )\n\n def _rope_scaling_validation(self):\n \"\"\"\n Validate the `rope_scaling` configuration.\n \"\"\"\n if self.rope_scaling is None:\n return\n\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\n raise ValueError(\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\n f\"got {self.rope_scaling}\"\n )\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\n raise ValueError(\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\n )\n if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:\n raise ValueError(f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\")" }, { "identifier": "LlamaForCausalLM", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past" }, { "identifier": "LlamaTokenizer", "path": "model/llama/tokenization_llama.py", "snippet": "class LlamaTokenizer(PreTrainedTokenizer):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is\n no padding token in the original model.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n legacy (`bool`, *optional*, defaults to `True`):\n Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622\n which includes fixes to properly handle tokens that appear after special tokens. A simple example:\n\n - `legacy=True`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=True)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\")\n [8774, 32099, 3, 5, 1]\n ```\n - `legacy=False`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=False)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\") # the extra space `[3]` is no longer here\n [8774, 32099, 5, 1]\n ```\n Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for\n more details.\n\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=None,\n sp_model_kwargs: Optional[Dict[str, Any]] = None,\n add_bos_token=True,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n legacy=None,\n **kwargs,\n ):\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token\n eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token\n pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token\n super().__init__(\n bos_token=bos_token,\n eos_token=eos_token,\n unk_token=unk_token,\n pad_token=pad_token,\n add_bos_token=add_bos_token,\n add_eos_token=add_eos_token,\n sp_model_kwargs=self.sp_model_kwargs,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n legacy=legacy,\n **kwargs,\n )\n if legacy is None:\n logger.warning_once(\n f\"You are using the default legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to\"\n \" read the related pull request available at https://github.com/huggingface/transformers/pull/24565, and set the legacy attribute accordingly.\"\n )\n legacy = True\n\n self.legacy = legacy\n self.vocab_file = vocab_file\n self.add_bos_token = add_bos_token\n self.add_eos_token = add_eos_token\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.Load(vocab_file)\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"sp_model\"] = None\n state[\"sp_model_proto\"] = self.sp_model.serialized_model_proto()\n return state\n\n def __setstate__(self, d):\n self.__dict__ = d\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.LoadFromSerializedProto(self.sp_model_proto)\n\n @property\n def vocab_size(self):\n \"\"\"Returns vocab size\"\"\"\n return self.sp_model.get_piece_size()\n\n def get_vocab(self):\n \"\"\"Returns vocab as a dict\"\"\"\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize\n def tokenize(self, text: \"TextInput\", **kwargs) -> List[str]:\n # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at\n # the beginning of the text\n if not self.legacy:\n text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, \" \")\n return super().tokenize(text, **kwargs)\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize\n def _tokenize(self, text, **kwargs):\n \"\"\"\n Returns a tokenized string.\n\n Since the sentencepiece internal model always adds a SPIECE_UNDERLINE, at the beginning of the provided text,\n we need to remove it by hand when the current text is a subsequence. This happens whenever the `self.tokenize`\n function is called with specials tokens: the input is split on the special tokens, and each subsequence is\n passed to `_tokenize`. Thus if a subsequence did not start with a `\" \"` or SPIECE_UNDERLINE, we have to remove\n the extra `SPIECE_UNDERLINE` prepended.\n \"\"\"\n if not self.legacy:\n is_first = text.startswith(SPIECE_UNDERLINE)\n if is_first:\n text = text[1:]\n\n tokens = self.sp_model.encode(text, out_type=str)\n\n if not self.legacy and not is_first and not text.startswith(\" \") and tokens[0].startswith(SPIECE_UNDERLINE):\n tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]\n return tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.sp_model.piece_to_id(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n token = self.sp_model.IdToPiece(index)\n return token\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n current_sub_tokens = []\n out_string = \"\"\n prev_is_special = False\n for i, token in enumerate(tokens):\n # make sure that special tokens are not decoded using sentencepiece model\n if token in self.all_special_tokens:\n if not prev_is_special and i != 0:\n out_string += \" \"\n out_string += self.sp_model.decode(current_sub_tokens) + token\n prev_is_special = True\n current_sub_tokens = []\n else:\n current_sub_tokens.append(token)\n prev_is_special = False\n out_string += self.sp_model.decode(current_sub_tokens)\n return out_string\n\n def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:\n \"\"\"\n Save the vocabulary and special tokens file to a directory.\n\n Args:\n save_directory (`str`):\n The directory in which to save the vocabulary.\n\n Returns:\n `Tuple(str)`: Paths to the files saved.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n elif not os.path.isfile(self.vocab_file):\n with open(out_vocab_file, \"wb\") as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n\n return (out_vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = bos_token_id + token_ids_0 + eos_token_id\n\n if token_ids_1 is not None:\n output = output + bos_token_id + token_ids_1 + eos_token_id\n\n return output\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(\n token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True\n )\n\n bos_token_id = [1] if self.add_bos_token else []\n eos_token_id = [1] if self.add_eos_token else []\n\n if token_ids_1 is None:\n return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id\n return (\n bos_token_id\n + ([0] * len(token_ids_0))\n + eos_token_id\n + bos_token_id\n + ([0] * len(token_ids_1))\n + eos_token_id\n )\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT\n sequence pair mask has the following format:\n\n ```\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n ```\n\n if token_ids_1 is None, only returns the first portion of the mask (0s).\n\n Args:\n token_ids_0 (`List[int]`):\n List of ids.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).\n \"\"\"\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)\n\n if token_ids_1 is not None:\n output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)\n\n return output\n\n def _build_conversation_input_ids(self, conversation: \"Conversation\") -> List[int]:\n r\"\"\"Builds the input ids for a conversation.\n This is the format used in the provided examples. System prompts should be manually added at the beginning of\n the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used.\n ```\n <bos>[INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n ```\n\n If you want to use your own system prompt, make sure to use both `B_SYS` and `E_SYS` use the following:\n ```python\n >>> from transformers import Conversation\n\n >>> Conversation(\n ... \"<<SYS>>\\n Only answer with emojis, and charades\\n<</SYS>>\\n\\nHow can I build a house in 10 septs?\"\n ... ) # doctest: +IGNORE_RESULT\n ```\n Args:\n conversation (`Conversation`):\n Conversation to build input ids for.\n Returns:\n `List[int]`:\n Input ids for the conversation.\n \"\"\"\n if len(conversation.past_user_inputs) > 0:\n if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:\n conversation.past_user_inputs[0] = (\n B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]\n )\n elif conversation.new_user_input:\n if not conversation.new_user_input.startswith(B_SYS) or E_SYS not in conversation.new_user_input:\n conversation.new_user_input = B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.new_user_input\n else:\n raise ValueError(\"Last message must be from user\")\n\n dialogue = list(conversation.iter_texts())\n if not all([is_user for is_user, msg in dialogue[::2]]) or not all(\n [not is_user for is_user, msg in dialogue[1::2]]\n ):\n raise ValueError(\n \"The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)\"\n )\n\n dialog_tokens: List[int] = []\n dialog_tokens += sum(\n [\n [self.bos_token_id]\n + self.encode(\n f\"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} \", add_special_tokens=False\n )\n + [self.eos_token_id]\n for prompt, answer in zip(dialogue[::2], dialogue[1::2])\n ],\n [],\n )\n dialog_tokens += [self.bos_token_id] + self.encode(\n f\"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}\", add_special_tokens=False\n )\n return dialog_tokens" }, { "identifier": "LlamaTokenizerFast", "path": "model/llama/tokenization_llama_fast.py", "snippet": "class LlamaTokenizerFast(PreTrainedTokenizerFast):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.\n\n This uses notably ByteFallback and no normalization.\n\n ```\n from transformers import LlamaTokenizerFast\n\n tokenizer = LlamaTokenizerFast.from_pretrained(\"hf-internal-testing/llama-tokenizer\")\n tokenizer.encode(\"Hello this is a test\")\n >>> [1, 15043, 445, 338, 263, 1243]\n ```\n\n If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or\n call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the\n values of the first token and final token of an encoded sequence will not be correct). For more details, checkout\n [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.\n\n\n This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\n refer to this superclass for more information regarding those methods.\n\n Args:\n vocab_file (`str`):\n [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that\n contains the vocabulary necessary to instantiate a tokenizer.\n tokenizer_file (`str`):\n [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that\n contains everything needed to load the tokenizer.\n\n clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`):\n Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra\n spaces.\n\n bos_token (`str`, *optional*, defaults to `\"<s>\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n\n eos_token (`str`, *optional*, defaults to `\"</s>\"`):\n The end of sequence token.\n\n unk_token (`str`, *optional*, defaults to `\"<unk>\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n slow_tokenizer_class = LlamaTokenizer\n padding_side = \"left\"\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file=None,\n tokenizer_file=None,\n clean_up_tokenization_spaces=False,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n add_bos_token=True,\n add_eos_token=False,\n **kwargs,\n ):\n super().__init__(\n vocab_file=vocab_file,\n tokenizer_file=tokenizer_file,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n unk_token=unk_token,\n bos_token=bos_token,\n eos_token=eos_token,\n **kwargs,\n )\n self._add_bos_token = add_bos_token\n self._add_eos_token = add_eos_token\n self.update_post_processor()\n\n self.vocab_file = vocab_file\n self.can_save_slow_tokenizer = False if not self.vocab_file else True\n\n def update_post_processor(self):\n \"\"\"\n Updates the underlying post processor with the current `bos_token` and `eos_token`.\n \"\"\"\n bos = self.bos_token\n bos_token_id = self.bos_token_id\n\n eos = self.eos_token\n eos_token_id = self.eos_token_id\n\n single = f\"{(bos+':0 ') * self.add_bos_token}$A:0{(' '+eos+':0') * self.add_eos_token}\"\n pair = f\"{single}{(' '+bos+':1') * self.add_bos_token} $B:1{(' '+eos+':1') * self.add_eos_token}\"\n\n special_tokens = []\n if self.add_bos_token:\n special_tokens.append((bos, bos_token_id))\n if self.add_eos_token:\n special_tokens.append((eos, eos_token_id))\n self._tokenizer.post_processor = processors.TemplateProcessing(\n single=single, pair=pair, special_tokens=special_tokens\n )\n\n @property\n def add_eos_token(self):\n return self._add_eos_token\n\n @property\n def add_bos_token(self):\n return self._add_bos_token\n\n @add_eos_token.setter\n def add_eos_token(self, value):\n self._add_eos_token = value\n self.update_post_processor()\n\n @add_bos_token.setter\n def add_bos_token(self, value):\n self._add_bos_token = value\n self.update_post_processor()\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n if not self.can_save_slow_tokenizer:\n raise ValueError(\n \"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow \"\n \"tokenizer.\"\n )\n\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n\n return (out_vocab_file,)\n\n def _build_conversation_input_ids(self, conversation: \"Conversation\"):\n \"\"\"Builds the input ids for a conversation.\n This is the format used in the provided examples. System prompts should be manually added at the beginning of\n the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used.\n ```\n <bos>[INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n ```\n\n If you want to use your own system prompt, make sure to use both `B_SYS` and `E_SYS` use the following:\n ```python\n >>> from transformers import Conversation\n\n >>> Conversation(\n ... \"<<SYS>>\\n Only answer with emojis, and charades\\n<</SYS>>\\n\\nHow can I build a house in 10 septs?\"\n ... )\n ```\n Args:\n conversation (`Conversation`):\n Conversation to build input ids for.\n Returns:\n `List[int]`:\n Input ids for the conversation.\n \"\"\"\n if len(conversation.past_user_inputs) > 0:\n if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:\n conversation.past_user_inputs[0] = (\n B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]\n )\n elif conversation.new_user_input:\n if not conversation.new_user_input.startswith(B_SYS) or E_SYS not in conversation.new_user_input:\n conversation.new_user_input = B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.new_user_input\n else:\n raise ValueError(\"Last message must be from user\")\n\n dialogue = list(conversation.iter_texts())\n if not all([is_user for is_user, msg in dialogue[::2]]) or not all(\n [not is_user for is_user, msg in dialogue[1::2]]\n ):\n raise ValueError(\n \"The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)\"\n )\n\n dialog_tokens = []\n dialog_tokens += sum(\n [\n [self.bos_token_id]\n + self.encode(\n f\"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} \", add_special_tokens=False\n )\n + [self.eos_token_id]\n for prompt, answer in zip(dialogue[::2], dialogue[1::2])\n ],\n [],\n )\n dialog_tokens += [self.bos_token_id] + self.encode(\n f\"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}\", add_special_tokens=False\n )\n return dialog_tokens" }, { "identifier": "print_rank_0", "path": "utils/common_utils.py", "snippet": "def print_rank_0(*message):\n \"\"\"If distributed is initialized print only on rank 0.\"\"\"\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(*message, flush=True)\n else:\n print(*message, flush=True)" }, { "identifier": "is_old_version", "path": "utils/common_utils.py", "snippet": "def is_old_version(path):\n new_vocab_files = ['merge.model']\n new_vocab_file_exists = []\n for filename in new_vocab_files:\n if not os.path.exists(os.path.join(path, filename)):\n new_vocab_file_exists.append(False)\n else:\n new_vocab_file_exists.append(True)\n if all(new_vocab_file_exists):\n return False\n if any(new_vocab_file_exists):\n return 'new_version_file_absent'\n else:\n return True" }, { "identifier": "build_tokenizer", "path": "tokenizer/tokenizer.py", "snippet": "def build_tokenizer(args):\n \"\"\"Initialize tokenizer.\"\"\"\n print_rank_0(\"> building {} tokenizer ...\".format(args.tokenizer_type))\n # if args.rank == 0:\n # print(\"> building {} tokenizer ...\".format(args.tokenizer_type), flush=True)\n\n # Select and instantiate the tokenizer.\n if args.tokenizer_type.lower() == \"GPT2BPETokenizer\".lower():\n assert args.vocab_file is not None\n assert args.merge_file is not None\n tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)\n elif args.tokenizer_type.lower() == \"SPMTokenizer\".lower():\n assert args.vocab_file is not None\n tokenizer = SentencePieceTokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"HFTokenizer\".lower():\n assert args.vocab_file is not None\n tokenizer = HFTokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"HFGPT2Tokenizer\".lower():\n if args.vocab_file is None:\n print(\n \"WARNING: No vocab file found, loading Huggingface's pretrained GPT2Tokenizer\"\n )\n tokenizer = HFGPT2Tokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"CharLevelTokenizer\".lower():\n tokenizer = CharLevelTokenizer(vocab_size=512)\n elif args.tokenizer_type.lower() == \"TiktokenTokenizer\".lower():\n assert args.vocab_file is not None\n tokenizer = TiktokenTokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"GLMTokenizer\".lower():\n if is_old_version(args.pretrained_model_path):\n print('is an old version')\n from model.glm.tokenization_glm_deprecated import GLMChineseTokenizer\n args.glm_mask = '[sMASK]'\n old_version_tokenizer = True\n tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)\n else:\n print('is not an old version')\n old_version_tokenizer = False\n tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)\n else:\n raise NotImplementedError(\n \"{} tokenizer is not \" \"implemented.\".format(args.tokenizer_type)\n )\n\n # Add vocab size.\n args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)\n\n return tokenizer" }, { "identifier": "HFTokenizer", "path": "tokenizer/tokenizer.py", "snippet": "class HFTokenizer(AbstractTokenizer):\n \"\"\"Designed to Integrate HF's Tokenizer library.\"\"\"\n\n def __init__(self, vocab_file):\n name = \"HFTokenizer\"\n super().__init__(name)\n\n self.tokenizer = Tokenizer.from_file(vocab_file)\n # self.eod_id = self.tokenizer.token_to_id(\"<|endoftext|>\")\n self.eod_id = self.tokenizer.token_to_id(\"<|end|>\")\n # self.pad_id = self.tokenizer.token_to_id(\"<|padding|>\")\n \n # 新词表没有<|padding|>, 用<|extratoken_1|>代替,和tokenization一致\n # self.pad_id = self.tokenizer.token_to_id(\"<|extratoken_1|>\")\n self.pad_id = self.tokenizer.token_to_id(\"<|pad|>\")\n\n @property\n def vocab_size(self):\n return self.tokenizer.get_vocab_size()\n\n @property\n def vocab(self):\n return self.tokenizer.get_vocab()\n\n @property\n def inv_vocab(self):\n return self.tokenizer.decoder\n\n def tokenize(self, text: str):\n return self.tokenizer.encode(text).ids\n\n def tokenize_batch(self, text_batch: Union[List[str], str]):\n return self.tokenizer.encode_batch(text_batch)\n\n def detokenize(self, token_ids):\n return self.tokenizer.decode(token_ids)\n\n @property\n def eod(self):\n return self.eod_id" }, { "identifier": "prepare_model_for_kbit_training", "path": "model/peft/utils/others.py", "snippet": "def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True):\n r\"\"\"\n This method wraps the entire protocol for preparing a model before running a training. This includes:\n 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm\n head to fp32\n\n Args:\n model, (`transformers.PreTrainedModel`):\n The loaded model from `transformers`\n \"\"\"\n loaded_in_kbit = getattr(model, \"is_loaded_in_8bit\", False) or getattr(model, \"is_loaded_in_4bit\", False)\n\n for name, param in model.named_parameters():\n # freeze base model's layers\n param.requires_grad = False\n \n # cast all non INT8 parameters to fp32\n for param in model.parameters():\n if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16):\n param.data = param.data.to(torch.float32)\n \n if loaded_in_kbit and use_gradient_checkpointing:\n # For backward compatibility\n if hasattr(model, \"enable_input_require_grads\"):\n model.enable_input_require_grads()\n else:\n \n def make_inputs_require_grad(module, input, output):\n output.requires_grad_(True)\n\n model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)\n\n # enable gradient checkpointing for memory efficiency\n model.gradient_checkpointing_enable()\n\n return model" }, { "identifier": "AdaLoraConfig", "path": "model/peft/tuner/adalora.py", "snippet": "class AdaLoraConfig(LoraConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`~peft.AdaLora`].\n\n Args:\n target_r (`int`): The target average rank of incremental matrix.\n init_r (`int`): The initial rank for each incremental matrix.\n tinit (`int`): The steps of initial fine-tuning warmup.\n tfinal (`int`): The step of final fine-tuning.\n deltaT (`int`): The time internval between two budget allocations.\n beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.\n beta2 (`float`): The hyperparameter of EMA for undertainty quantification.\n orth_reg_weight (`float`): The coefficient of orthogonal regularization.\n total_step (`int`): The total training steps that should be specified before training.\n rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.\n \"\"\"\n\n target_r: int = field(default=8, metadata={\"help\": \"Target Lora matrix dimension.\"})\n init_r: int = field(default=12, metadata={\"help\": \"Intial Lora matrix dimension.\"})\n tinit: int = field(default=0, metadata={\"help\": \"The steps of initial warmup.\"})\n tfinal: int = field(default=0, metadata={\"help\": \"The steps of final warmup.\"})\n deltaT: int = field(default=1, metadata={\"help\": \"Step interval of rank allocation.\"})\n beta1: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n beta2: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n orth_reg_weight: float = field(default=0.5, metadata={\"help\": \"The orthogonal regularization coefficient.\"})\n total_step: Optional[int] = field(default=None, metadata={\"help\": \"The total training steps.\"})\n rank_pattern: Optional[dict] = field(default=None, metadata={\"help\": \"The saved rank pattern.\"})\n init_lora_weights: bool = field(\n default=True,\n metadata={\"help\": \"Whether to initialize the weights of the Lora layers.\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.ADALORA" } ]
import os import torch import sys import peft import model.peft.modeling_peft # noqa import bitsandbytes as bnb # noqa import accelerate # noqa from utils.common_utils import get_model_params_num from transformers import ( # noqa: E402 CONFIG_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast ) from .gpt_neox.configuration_gpt_neox import GPTNeoXConfig from .gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM from .gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast from .llama.configuration_llama import LlamaConfig from .llama.modeling_llama import LlamaForCausalLM from .llama.tokenization_llama import LlamaTokenizer from .llama.tokenization_llama_fast import LlamaTokenizerFast from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, StateDictType, ) from utils.common_utils import print_rank_0, is_old_version from tokenizer import build_tokenizer from tokenizer.tokenizer import HFTokenizer from peft.tuners.lora import LoraLayer from model.peft.utils import prepare_model_for_kbit_training from peft import ( # noqa LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptEncoderReparameterizationType, PromptTuningConfig, PromptTuningInit, TaskType, get_peft_model ) from model.peft.tuner import AdaLoraConfig from transformers import BitsAndBytesConfig from packaging import version from .glm.tokenization_glm_deprecated import GLMChineseTokenizer
18,455
print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox': auto_config = GPTNeoXConfig auto_model_class = GPTNeoXForCausalLM elif args.model_type == 'llama': auto_config = LlamaConfig auto_model_class = LlamaForCausalLM elif args.model_type == 'glm': auto_config = GLMConfig auto_model_class = GLMForConditionalGeneration # else: # auto_config = AutoConfig # auto_model_class = AutoModelForCausalLM # with init_empty_weights_with_disk_offload(ignore_tie_weights=False): if args.pretrained_model_path: logger.info("Training model from checkpoint") config = auto_config.from_pretrained(args.pretrained_model_path) if args.peft_type != "qlora": # config = auto_config.from_pretrained(args.pretrained_model_path) # model = auto_model_class.from_pretrained(args.pretrained_model_path, trust_remote_code=True, device_map='auto').cuda() model = auto_model_class.from_pretrained(args.pretrained_model_path, trust_remote_code=True).cuda() else: if BitsAndBytesConfig is None: raise ImportError( "To use qlora, please upgrade transformers to 4.30.1 by `pip install -U transformers==4.30.1`" ) if bnb is None: raise ImportError("To use qlora, please install bitsandbytes by `pip install -U bitsandbytes==0.39.0`") try: except ImportError: raise ImportError("To use qlora, please install accelerate by `pip install -U accelerate==0.20.3`") peft_version = version.parse(peft.__version__) if peft_version < version.parse("0.4.0"): raise RuntimeError(f"Qlora needs peft>=0.4.0 but current peft version is {peft_version}") if args.bits not in [4, 8]: raise ValueError(f"Qlora only support 4 bits or 8 bits but got {args.bits} bits.") if args.bf16: torch_dtype = torch.bfloat16 else: torch_dtype = torch.float32 if args.fp16: compute_dtype = torch.float16 elif args.bf16: compute_dtype = torch.bfloat16 else: compute_dtype = torch.float32 model = auto_model_class.from_pretrained( # noqa args.pretrained_model_path, trust_remote_code=True, load_in_4bit=args.bits == 4, load_in_8bit=args.bits == 8, torch_dtype=torch_dtype, quantization_config=BitsAndBytesConfig( load_in_4bit=args.bits == 4, load_in_8bit=args.bits == 8, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ) ) else: logger.info("Training model from scratch") if args.model_type == 'gpt_neox': config = GPTNeoXConfig.from_json_file(args.config_path + '/config.json') # model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code) model = GPTNeoXForCausalLM._from_config(config) elif args.model_type == 'llama': config = LlamaConfig.from_json_file(args.config_path + '/config.json') # llama use xformers if args.use_xformers: config.use_xformers = True model = LlamaForCausalLM._from_config(config) elif args.model_type == 'glm': config = GLMConfig.from_json_file(args.config_path + '/config.json') model = GLMForConditionalGeneration._from_config(config) else: config = AutoConfig.from_json_file(args.config_path + '/config.json') model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. if args.model_type not in ['glm']: embedding_size = model.get_input_embeddings().weight.shape[0] print_rank_0('embedding size: ' + str(embedding_size)) print_rank_0('vocab size: ' + str(tokenizer.vocab_size)) if tokenizer.vocab_size > embedding_size: model.resize_token_embeddings(tokenizer.vocab_size) print_rank_0('resize embedding size: ' + str(model.get_input_embeddings().weight.shape[0])) print_rank_0(config)
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox': auto_config = GPTNeoXConfig auto_model_class = GPTNeoXForCausalLM elif args.model_type == 'llama': auto_config = LlamaConfig auto_model_class = LlamaForCausalLM elif args.model_type == 'glm': auto_config = GLMConfig auto_model_class = GLMForConditionalGeneration # else: # auto_config = AutoConfig # auto_model_class = AutoModelForCausalLM # with init_empty_weights_with_disk_offload(ignore_tie_weights=False): if args.pretrained_model_path: logger.info("Training model from checkpoint") config = auto_config.from_pretrained(args.pretrained_model_path) if args.peft_type != "qlora": # config = auto_config.from_pretrained(args.pretrained_model_path) # model = auto_model_class.from_pretrained(args.pretrained_model_path, trust_remote_code=True, device_map='auto').cuda() model = auto_model_class.from_pretrained(args.pretrained_model_path, trust_remote_code=True).cuda() else: if BitsAndBytesConfig is None: raise ImportError( "To use qlora, please upgrade transformers to 4.30.1 by `pip install -U transformers==4.30.1`" ) if bnb is None: raise ImportError("To use qlora, please install bitsandbytes by `pip install -U bitsandbytes==0.39.0`") try: except ImportError: raise ImportError("To use qlora, please install accelerate by `pip install -U accelerate==0.20.3`") peft_version = version.parse(peft.__version__) if peft_version < version.parse("0.4.0"): raise RuntimeError(f"Qlora needs peft>=0.4.0 but current peft version is {peft_version}") if args.bits not in [4, 8]: raise ValueError(f"Qlora only support 4 bits or 8 bits but got {args.bits} bits.") if args.bf16: torch_dtype = torch.bfloat16 else: torch_dtype = torch.float32 if args.fp16: compute_dtype = torch.float16 elif args.bf16: compute_dtype = torch.bfloat16 else: compute_dtype = torch.float32 model = auto_model_class.from_pretrained( # noqa args.pretrained_model_path, trust_remote_code=True, load_in_4bit=args.bits == 4, load_in_8bit=args.bits == 8, torch_dtype=torch_dtype, quantization_config=BitsAndBytesConfig( load_in_4bit=args.bits == 4, load_in_8bit=args.bits == 8, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ) ) else: logger.info("Training model from scratch") if args.model_type == 'gpt_neox': config = GPTNeoXConfig.from_json_file(args.config_path + '/config.json') # model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code) model = GPTNeoXForCausalLM._from_config(config) elif args.model_type == 'llama': config = LlamaConfig.from_json_file(args.config_path + '/config.json') # llama use xformers if args.use_xformers: config.use_xformers = True model = LlamaForCausalLM._from_config(config) elif args.model_type == 'glm': config = GLMConfig.from_json_file(args.config_path + '/config.json') model = GLMForConditionalGeneration._from_config(config) else: config = AutoConfig.from_json_file(args.config_path + '/config.json') model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. if args.model_type not in ['glm']: embedding_size = model.get_input_embeddings().weight.shape[0] print_rank_0('embedding size: ' + str(embedding_size)) print_rank_0('vocab size: ' + str(tokenizer.vocab_size)) if tokenizer.vocab_size > embedding_size: model.resize_token_embeddings(tokenizer.vocab_size) print_rank_0('resize embedding size: ' + str(model.get_input_embeddings().weight.shape[0])) print_rank_0(config)
num_params = get_model_params_num(model)
0
2023-11-02 01:37:01+00:00
24k
bytedance/cryostar
projects/star/train_atom.py
[ { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2)\n # yapf: enable\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Supposing that D is 96, a point is at 0.0:\n - adding 48 should move it to the right corner which is 1.0\n 1.0 = 0.0 + 48 / (96 / 2)\n - adding 96(>48) should leave it at 0.0\n 0.0 = 0.0 + 96 / (96 / 2) - 2.0\n - adding -96(<48) should leave it at 0.0\n 0.0 = 0.0 - 96 / (96 / 2) + 2.0\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n\n grid = einops.rearrange(self.coords, \"N C2 -> 1 1 N C2\") - \\\n einops.rearrange(trans, \"B T C2 -> B T 1 C2\") * 2 / self.D\n grid = grid.flip(-1) # convert the first axis from slow-axis to fast-axis\n grid[grid >= 1] -= 2\n grid[grid <= -1] += 2\n grid.clamp_(-1.0, 1.0)\n\n sampled = F.grid_sample(einops.rearrange(images, \"B NY NX -> B 1 NY NX\"), grid, align_corners=True)\n\n sampled = einops.rearrange(sampled, \"B 1 T (NY NX) -> B T NY NX\", NX=NX, NY=NY)\n return sampled" }, { "identifier": "StarfileDataSet", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\" in self.df:\n optics_df = self.df[\"optics\"]\n particles_df = self.df[\"particles\"]\n else:\n optics_df = None\n particles_df = self.df\n self.particles_df = particles_df\n\n if cfg.apix is None:\n if optics_df is not None and \"rlnImagePixelSize\" in optics_df:\n self.apix = float(optics_df[\"rlnImagePixelSize\"][0])\n print(f\"Infer dataset apix={self.apix} from first optic group.\")\n elif \"rlnDetectorPixelSize\" in particles_df and \"rlnMagnification\" in particles_df:\n self.apix = float(particles_df[\"rlnDetectorPixelSize\"][0] / particles_df[\"rlnMagnification\"][0] * 1e4)\n print(f\"Infer dataset apix={self.apix} from first particle meta data.\")\n else:\n raise AttributeError(\"Cannot parse apix from starfile, please set it in config by hand.\")\n else:\n self.apix = cfg.apix\n\n if cfg.side_shape is None:\n tmp_mrc_path = osp.join(cfg.dataset_dir, particles_df[\"rlnImageName\"][0].split('@')[-1])\n with mrcfile.mmap(tmp_mrc_path, mode=\"r\", permissive=True) as m:\n self.side_shape = m.data.shape[-1]\n print(f\"Infer dataset side_shape={self.side_shape} from the 1st particle.\")\n else:\n self.side_shape = cfg.side_shape\n\n self.num_proj = len(particles_df)\n\n self.down_side_shape = self.side_shape\n if cfg.down_side_shape is not None:\n self.down_side_shape = cfg.down_side_shape\n\n if cfg.mask_rad is not None:\n self.mask = Mask(self.down_side_shape, cfg.mask_rad)\n\n self.f_mu = None\n self.f_std = None\n\n def __len__(self):\n return self.num_proj\n\n def estimate_normalization(self):\n if self.f_mu is None and self.f_std is None:\n f_sub_data = []\n # I have checked that the standard deviation of 10/100/1000 particles is similar\n for i in range(0, len(self), len(self) // 100):\n f_sub_data.append(self[i][\"fproj\"])\n f_sub_data = torch.cat(f_sub_data, dim=0)\n # self.f_mu = torch.mean(f_sub_data)\n self.f_mu = 0.0 # just follow cryodrgn\n self.f_std = torch.std(f_sub_data).item()\n else:\n raise Exception(\"The normalization factor has been estimated!\")\n\n def __getitem__(self, idx):\n item_row = self.particles_df.iloc[idx]\n try:\n img_name_raw = item_row[\"rlnImageName\"]\n in_mrc_idx, img_name = item_row[\"rlnImageName\"].split(\"@\")\n in_mrc_idx = int(in_mrc_idx) - 1\n mrc_path = osp.join(self.cfg.dataset_dir, img_name)\n with mrcfile.mmap(mrc_path, mode=\"r\", permissive=True) as mrc:\n if mrc.data.ndim > 2:\n proj = torch.from_numpy(np.array(mrc.data[in_mrc_idx])).float() * self.cfg.scale_images\n else:\n # the mrcs file can contain only one particle\n proj = torch.from_numpy(np.array(mrc.data)).float() * self.cfg.scale_images\n\n # get (1, side_shape, side_shape) proj\n if len(proj.shape) == 2:\n proj = proj[None, :, :] # add a dummy channel (for consistency w/ img fmt)\n else:\n assert len(proj.shape) == 3 and proj.shape[0] == 1 # some starfile already have a dummy channel\n\n # down-sample\n if self.down_side_shape != self.side_shape:\n if self.cfg.down_method == \"interp\":\n proj = tvf.resize(proj, [self.down_side_shape, ] * 2, antialias=True)\n elif self.cfg.down_method == \"fft\":\n proj = downsample_2d(proj[0, :, :], self.down_side_shape)[None, :, :]\n else:\n raise NotImplementedError\n\n if self.cfg.mask_rad is not None:\n proj = self.mask(proj)\n\n except Exception as e:\n print(f\"WARNING: Particle image {img_name_raw} invalid! Setting to zeros.\")\n print(e)\n proj = torch.zeros(1, self.down_side_shape, self.down_side_shape)\n\n if self.cfg.power_images != 1.0:\n proj *= self.cfg.power_images\n\n # Generate CTF from CTF paramaters\n defocusU = torch.from_numpy(np.array(item_row[\"rlnDefocusU\"] / 1e4, ndmin=2)).float()\n defocusV = torch.from_numpy(np.array(item_row[\"rlnDefocusV\"] / 1e4, ndmin=2)).float()\n angleAstigmatism = torch.from_numpy(np.radians(np.array(item_row[\"rlnDefocusAngle\"], ndmin=2))).float()\n\n # Read \"GT\" orientations\n if self.cfg.ignore_rots:\n rotmat = torch.eye(3).float()\n else:\n # yapf: disable\n rotmat = torch.from_numpy(euler_angles2matrix(\n np.radians(-item_row[\"rlnAngleRot\"]),\n # np.radians(particle[\"rlnAngleTilt\"]) * (-1 if self.cfg.invert_hand else 1),\n np.radians(-item_row[\"rlnAngleTilt\"]),\n np.radians(-item_row[\"rlnAnglePsi\"]))\n ).float()\n # yapf: enable\n\n # Read \"GT\" shifts\n if self.cfg.ignore_trans:\n shiftX = torch.tensor([0.])\n shiftY = torch.tensor([0.])\n else:\n # support early starfile formats\n # Particle translations used to be in pixels (rlnOriginX and rlnOriginY) but this changed to Angstroms\n # (rlnOriginXAngstrom and rlnOriginYAngstrom) in relion 3.1.\n # https://relion.readthedocs.io/en/release-3.1/Reference/Conventions.html\n if \"rlnOriginXAngst\" in item_row:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginXAngst\"], dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginYAngst\"], dtype=np.float32))\n else:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginX\"] * self.apix, dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginY\"] * self.apix, dtype=np.float32))\n\n fproj = primal_to_fourier_2d(proj)\n\n if self.f_mu is not None:\n fproj = (fproj - self.f_mu) / self.f_std\n proj = fourier_to_primal_2d(fproj).real\n\n in_dict = {\n \"proj\": proj,\n \"rotmat\": rotmat,\n \"defocusU\": defocusU,\n \"defocusV\": defocusV,\n \"shiftX\": shiftX,\n \"shiftY\": shiftY,\n \"angleAstigmatism\": angleAstigmatism,\n \"idx\": torch.tensor(idx, dtype=torch.long),\n \"fproj\": fproj,\n \"imgname_raw\": img_name_raw\n }\n\n if \"rlnClassNumber\" in item_row:\n in_dict[\"class_id\"] = item_row[\"rlnClassNumber\"]\n\n return in_dict" }, { "identifier": "StarfileDatasetConfig", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDatasetConfig:\n dataset_dir: str\n starfile_path: str\n # if is not specified, the following apix, and side_shape will be inferred from starfile\n apix: float = None\n side_shape: int = None\n # down-sample the original image or not\n down_side_shape: int = None\n down_method: str = \"interp\"\n # apply a circular mask on input image or not\n mask_rad: float = None\n # change image values\n scale_images: float = 1.0\n power_images: float = field(\n default=1.0,\n metadata={\"help\": \"Change the power of the signal by multiplying a constant number.\"})\n # ignore pose from starfile or not\n ignore_trans: bool = False\n ignore_rots: bool = False\n # invert_hand: bool = field(\n # default=False,\n # metadata={\"help\": \"Invert handedness when reading relion data.\"})" }, { "identifier": "Mask", "path": "cryostar/utils/dataio.py", "snippet": "class Mask(torch.nn.Module):\n\n def __init__(self, im_size, rad):\n super(Mask, self).__init__()\n\n mask = torch.lt(torch.linspace(-1, 1, im_size)[None]**2 + torch.linspace(-1, 1, im_size)[:, None]**2, rad**2)\n # float for pl ddp broadcast compatible\n self.register_buffer('mask', mask.float())\n self.num_masked = torch.sum(mask).item()\n\n def forward(self, x):\n return x * self.mask" }, { "identifier": "CTFRelion", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFRelion(CTFBase):\n \"\"\"\n BUG: There are two bugs in this file:\n 1. `self.angleFrequency` has some error for even-sized grid.\n 2. `local_defocus` in `get_ctf()` has some error, `angleAstigmatism` should be\n replaced with `defocusU - defocusV`.\n\n The bugs will not affect real-world data too much. But you may encounter some issues\n on simulated datasets. Use CTFCryoDRGN instead.\n \"\"\"\n\n def __init__(self,\n size=257,\n resolution=0.8,\n kV=300.0,\n valueNyquist=1.,\n defocusU=1.,\n defocusV=1.,\n angleAstigmatism=0.,\n cs=2.7,\n phasePlate=0.,\n amplitudeContrast=.1,\n bFactor=0.,\n num_particles=500,\n requires_grad=False,\n precompute=False,\n flip_images=False):\n super(CTFRelion, self).__init__(resolution, num_particles, requires_grad)\n self.requires_grad = requires_grad\n self.flip_images = flip_images\n\n self.size = size # in pixel\n self.resolution = resolution # in angstrom\n self.kV = kV # in kilovolt\n\n self.valueNyquist = valueNyquist\n self.phasePlate = phasePlate / 180. * np.pi # in radians (converted from degrees)\n self.amplitudeContrast = amplitudeContrast\n self.bFactor = bFactor\n\n self.frequency = 1. / self.resolution\n\n self.wavelength = self._get_ewavelength(self.kV * 1e3) # input in V (so we convert kv*1e3)\n\n angleAstigmatism = angleAstigmatism / 180. * np.pi # input in degree converted in radian\n cs = cs * 1e7 # input in mm converted in angstrom\n # the angleAstigmatism, defocusU, defocusV and cs are nn.Parameter of size (N, 1, 1)\n self.angleAstigmatism = nn.Parameter(angleAstigmatism * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.cs = nn.Parameter(cs * torch.ones((num_particles, 1, 1), dtype=torch.float32), requires_grad=requires_grad)\n self.defocusU = nn.Parameter(defocusU * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.defocusV = nn.Parameter(defocusV * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n\n self.precomputed_filters = precompute\n\n ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n self.register_buffer(\"r2\", mx**2 + my**2)\n self.register_buffer(\"r\", torch.sqrt(self.r2))\n self.register_buffer(\"angleFrequency\", torch.atan2(my, mx))\n\n if not self.requires_grad and self.precomputed_filters:\n print(\"Precomputing hFourier in CTF\")\n self.register_buffer('hFourier', self.get_ctf(torch.arange(num_particles), num_particles))\n\n def _get_ewavelength(self, U):\n # assumes V as input, returns wavelength in angstrom\n h = scipy.constants.h\n e = scipy.constants.e\n c = scipy.constants.c\n m0 = scipy.constants.m_e\n\n return h / math.sqrt(2. * m0 * e * U) / math.sqrt(1 + e * U / (2 * m0 * c**2)) * 1e10\n\n def get_ctf(self, idcs, B, cpu_params={}, frequency_marcher=None):\n defocusU = self.defocusU[idcs, :, :]\n defocusV = self.defocusV[idcs, :, :]\n angleAstigmatism = self.angleAstigmatism[idcs, :, :]\n cs = self.cs[idcs, :, :]\n\n ac = self.amplitudeContrast\n pc = math.sqrt(1. - ac**2)\n K1 = np.pi / 2. * cs * self.wavelength**3\n K2 = np.pi * self.wavelength\n\n # Cut-off from frequency marcher\n if frequency_marcher is not None:\n self.size_after_fm = 2 * frequency_marcher.f + 1\n if self.size_after_fm > self.size:\n self.size_after_fm = self.size\n angleFrequency = frequency_marcher.cut_coords_plane(self.angleFrequency.reshape(\n self.size, self.size, 1)).reshape(self.size_after_fm, self.size_after_fm)\n r2 = frequency_marcher.cut_coords_plane(self.r2.reshape(self.size, self.size,\n 1)).reshape(self.size_after_fm, self.size_after_fm)\n else:\n self.size_after_fm = self.size\n angleFrequency = self.angleFrequency\n r2 = self.r2\n\n angle = angleFrequency - angleAstigmatism\n local_defocus = 1e4 * (defocusU + defocusV) / 2. + angleAstigmatism * torch.cos(2. * angle)\n\n gamma = K1 * r2**2 - K2 * r2 * local_defocus - self.phasePlate\n hFourier = -pc * torch.sin(gamma) + ac * torch.cos(gamma)\n\n if self.valueNyquist != 1:\n decay = np.sqrt(-np.log(self.valueNyquist)) * 2. * self.resolution\n envelope = torch.exp(-self.frequency * decay**2 * r2)\n hFourier *= envelope\n\n return hFourier\n\n def oversample_multiply_crop(self, x_fourier, hFourier):\n # we assume that the shape of the CTF is always going to be bigger\n # than the size of the input image\n input_sz = x_fourier.shape[-1]\n if input_sz != self.size_after_fm:\n x_primal = fourier_to_primal_2d(x_fourier)\n\n pad_len = (self.size_after_fm - x_fourier.shape[-1]) // 2 # here we assume even lengths\n p2d = (pad_len, pad_len, pad_len, pad_len)\n x_primal_padded = F.pad(x_primal, p2d, 'constant', 0)\n\n x_fourier_padded = primal_to_fourier_2d(x_primal_padded)\n\n x_fourier_padded_filtered = x_fourier_padded * hFourier[:, None, :, :]\n return x_fourier_padded_filtered[..., pad_len:-pad_len, pad_len:-pad_len]\n else:\n return x_fourier * hFourier[:, None, :, :]\n\n def get_cpu_params(self, idcs, ctf_params, flip=False):\n batch_size = idcs.shape[0]\n self.defocusU[idcs, :, :] = ctf_params['defocusU'][:batch_size] if not flip else\\\n ctf_params['defocusU'][batch_size:]\n self.defocusV[idcs, :, :] = ctf_params['defocusV'][:batch_size] if not flip else\\\n ctf_params['defocusV'][batch_size:]\n self.angleAstigmatism[idcs, :, :] = ctf_params['angleAstigmatism'][:batch_size] if not flip else\\\n ctf_params['angleAstigmatism'][batch_size:]\n cpu_params = {}\n return cpu_params\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n # This is when we want to prescribe parameters for the CTF\n if x_fourier.dim() == 3:\n x_fourier = x_fourier[None, ...]\n # x_fourier: B, 1, S, S\n batch_size = len(idcs)\n cpu_params = {}\n if ctf_params:\n cpu_params = self.get_cpu_params(idcs, ctf_params, flip=False)\n\n # if new params for the CTF have been prescribed or we are optimizing it\n # then request the evaluation of the CTF\n if not ctf_params and self.precomputed_filters and not self.requires_grad:\n hFourier = self.hFourier[idcs, :, :]\n else:\n hFourier = self.get_ctf(idcs, batch_size, cpu_params=cpu_params, frequency_marcher=frequency_marcher)\n\n if self.flip_images:\n flipped_hFourier = torch.flip(hFourier, [1, 2])\n\n hFourier = torch.cat([hFourier, flipped_hFourier], dim=0)\n\n return self.oversample_multiply_crop(x_fourier, hFourier)" }, { "identifier": "CTFCryoDRGN", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFCryoDRGN(CTFBase):\n\n def __init__(self,\n size,\n resolution,\n num_particles=None,\n kV=300,\n cs=2.0,\n amplitudeContrast=0.1,\n requires_grad=False):\n super(CTFBase, self).__init__()\n self.size = size\n self.resolution = resolution\n self.requires_grad = requires_grad\n self.kV = kV\n self.cs = cs\n self.ac = amplitudeContrast\n # ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n # mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n ax = torch.fft.fftshift(torch.fft.fftfreq(self.size, self.resolution))\n mx, my = torch.meshgrid(ax, ax, indexing=\"xy\")\n freqs = torch.stack([mx.flatten(), my.flatten()], 1)\n self.register_buffer(\"freqs\", freqs)\n\n def get_ctf(self, ctf_params={}):\n bsz = len(ctf_params[\"defocusU\"])\n device = self.freqs.device\n hFourier = compute_ctf(freqs=self.freqs.repeat(bsz, 1, 1),\n dfu=(ctf_params[\"defocusU\"] * 1e4).squeeze(1),\n dfv=(ctf_params[\"defocusV\"] * 1e4).squeeze(1),\n dfang=torch.rad2deg(ctf_params[\"angleAstigmatism\"]).squeeze(1),\n volt=torch.tensor(self.kV, device=device).repeat(bsz, 1),\n cs=torch.tensor(self.cs, device=device).repeat(bsz, 1),\n w=torch.tensor(self.ac, device=device).repeat(bsz,\n 1)).reshape(bsz, self.size, self.size)\n return hFourier\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n hFourier = -self.get_ctf(ctf_params)\n return x_fourier * hFourier[:, None, :, :]" }, { "identifier": "calc_cor_loss", "path": "cryostar/utils/losses.py", "snippet": "def calc_cor_loss(pred_images, gt_images, mask=None):\n if mask is not None:\n pred_images = mask(pred_images)\n gt_images = mask(gt_images)\n pixel_num = mask.num_masked\n else:\n pixel_num = pred_images.shape[-2] * pred_images.shape[-1]\n\n # b, c, h, w -> b, c, num_pix\n pred_images = pred_images.flatten(start_dim=2)\n gt_images = gt_images.flatten(start_dim=2)\n\n # b, c\n dots = (pred_images * gt_images).sum(-1)\n # b, c -> b, c\n err = -dots / (gt_images.std(-1) + 1e-5) / (pred_images.std(-1) + 1e-5)\n # b, c -> b -> 1 value\n err = err.sum(-1).mean() / pixel_num\n return err" }, { "identifier": "calc_kl_loss", "path": "cryostar/utils/losses.py", "snippet": "def calc_kl_loss(mu, log_var, free_bits, reduction=\"mean\"):\n kld_loss = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp())\n # free bits\n kld_loss = torch.clamp(kld_loss, free_bits) # (bsz, z-dim)\n kld_loss = torch.mean(kld_loss, dim=1) # (bsz, )\n if reduction == \"mean\":\n kld_loss = torch.mean(kld_loss) # averaged over bsz x z-dim\n elif reduction == \"none\":\n kld_loss = kld_loss\n else:\n raise NotImplementedError\n return kld_loss" }, { "identifier": "log_to_current", "path": "cryostar/utils/misc.py", "snippet": "def set_seed(seed: int = 42):\ndef chain(arg, *funcs):\ndef convert_to_numpy(*args):\ndef CHECK_SHAPE(tensor, expected_shape):\ndef ASSERT_SHAPE(tensor, expected_shape):\ndef parse_mmengine_args(override_mode=\"default\"):\ndef flatten_nested_dict(nested: Union[dict, Config]) -> dict:\ndef warmup(warmup_step, lower=0.0, upper=1.0):\n def run(cur_step):\ndef init_mmengine_config(args):\ndef init_mmengine_exp(args,\n exp_prefix='',\n backup_list=None,\n inplace=True,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\",\n tensorboard=False):\ndef _get_next_version(root_dir, dir_name_prefix):\ndef pl_init_exp(override_mode=\"default\",\n exp_prefix='',\n backup_list=None,\n inplace=False,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\"):\ndef save_pdb(CAs, path, ref_pdb_path):\ndef load_CAs_from_pdb(file):\ndef load_NCaC_from_pdb(file):\ndef load_chain_A(pdb_path):\ndef points_to_pdb(path_to_save, points: np.ndarray):\ndef point_stack_to_pdb(path_to_save, point_stack: np.ndarray):\ndef find_rigid_alignment(A, B):\ndef batch_find_rigid_alignment(A, B):\ndef pretty_dict(x, precision=3):\ndef create_sphere_mask(d, h, w, center=None, radius=None) -> np.ndarray:\ndef create_circular_mask(h, w, center=None, radius=None) -> np.ndarray:\n H = A_c.T.mm(B_c)\n U, S, V = torch.svd(H)\n R = V.mm(U.T)\n H = einops.einsum(A_c, B_c, \"b n c1, b n c2 -> b c1 c2\")\n V = VmT.mT\n R = einops.einsum(V, U.transpose(2, 1), \"b c1 c2, b c2 c3 -> b c1 c3\")" }, { "identifier": "bt_save_pdb", "path": "cryostar/utils/pdb_tools.py", "snippet": "def bt_save_pdb(file_path: Union[str, Path], array: Union[AtomArray, AtomArrayStack], **kwargs):\n \"\"\"Save biotite AtomArray or AtomArrayStack to pdb file\n\n Parameters\n ----------\n file_path: save file path\n array: the structure to be saved\n kwargs: additional parameters to be passed, always empty\n\n \"\"\"\n bt_struc.io.save_structure(file_path, array, **kwargs)" }, { "identifier": "EMAN2Grid", "path": "cryostar/gmm/gmm.py", "snippet": "class EMAN2Grid(BaseGrid):\n \"\"\"EMAN2 style grid.\n origin set to -(side_shape // 2) * voxel_size\n\n \"\"\"\n\n def __init__(self, side_shape, voxel_size):\n origin = -side_shape // 2 * voxel_size\n super().__init__(side_shape=side_shape, voxel_size=voxel_size, origin=origin)" }, { "identifier": "batch_projection", "path": "cryostar/gmm/gmm.py", "snippet": "def batch_projection(gauss: Gaussian, rot_mats: torch.Tensor, line_grid: Grid) -> torch.Tensor:\n \"\"\"A quick version of e2gmm projection.\n\n Parameters\n ----------\n gauss: (b/1, num_centers, 3) mus, (b/1, num_centers) sigmas and amplitudes\n rot_mats: (b, 3, 3)\n line_grid: (num_pixels, 3) coords, (nx, ) shape\n\n Returns\n -------\n proj: (b, y, x) projections\n \"\"\"\n\n centers = einops.einsum(rot_mats, gauss.mus, \"b c31 c32, b nc c32 -> b nc c31\")\n\n sigmas = einops.rearrange(gauss.sigmas, 'b nc -> b 1 nc')\n sigmas = 2 * sigmas**2\n\n proj_x = einops.rearrange(line_grid.coords, \"nx -> 1 nx 1\") - einops.rearrange(centers[..., 0], \"b nc -> b 1 nc\")\n proj_x = torch.exp(-proj_x**2 / sigmas)\n\n proj_y = einops.rearrange(line_grid.coords, \"ny -> 1 ny 1\") - einops.rearrange(centers[..., 1], \"b nc -> b 1 nc\")\n proj_y = torch.exp(-proj_y**2 / sigmas)\n\n proj = einops.einsum(gauss.amplitudes, proj_x, proj_y, \"b nc, b nx nc, b ny nc -> b nx ny\")\n proj = einops.rearrange(proj, \"b nx ny -> b ny nx\")\n return proj" }, { "identifier": "Gaussian", "path": "cryostar/gmm/gmm.py", "snippet": "class Gaussian:\n mus: Union[torch.Tensor, np.ndarray]\n sigmas: Union[torch.Tensor, np.ndarray]\n amplitudes: Union[torch.Tensor, np.ndarray]" }, { "identifier": "E3Deformer", "path": "cryostar/gmm/deformer.py", "snippet": "class E3Deformer(torch.nn.Module, DeformerProtocol):\n\n def transform(self, deformation, coords):\n ASSERT_SHAPE(coords, (None, 3))\n ASSERT_SHAPE(deformation, (None, coords.shape[0] * 3))\n\n bsz = deformation.shape[0]\n shift = deformation.reshape(bsz, -1, 3)\n return shift + coords" }, { "identifier": "NMADeformer", "path": "cryostar/gmm/deformer.py", "snippet": "class NMADeformer(torch.nn.Module, DeformerProtocol):\n def __init__(self, modes: torch.FloatTensor) -> None:\n super().__init__()\n modes = einops.rearrange(\n modes, \"(num_coords c3) num_modes -> num_modes num_coords c3\", c3=3\n )\n self.register_buffer(\"modes\", modes)\n self.num_modes = modes.shape[0]\n self.num_coords = modes.shape[1]\n\n def transform(self, deformation, coords):\n ASSERT_SHAPE(coords, (self.num_coords, 3))\n ASSERT_SHAPE(deformation, (None, 6 + self.num_modes))\n\n axis_angle = deformation[..., :3]\n translation = deformation[..., 3:6] * 10\n nma_coeff = deformation[..., 6:]\n rotation_matrix = axis_angle_to_matrix(axis_angle)\n\n nma_deform_e3 = einops.einsum(\n nma_coeff, self.modes, \"bsz num_modes, num_modes num_coords c3 -> bsz num_coords c3\"\n )\n rotated_coords = einops.einsum(rotation_matrix, nma_deform_e3 + coords,\n \"bsz c31 c32, bsz num_coords c31 -> bsz num_coords c32\")\n deformed_coords = rotated_coords + einops.rearrange(translation, \"bsz c3 -> bsz 1 c3\")\n return deformed_coords" }, { "identifier": "primal_to_fourier_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "@torch.autocast(\"cuda\")\ndef primal_to_fourier_2d(r: torch.Tensor) -> torch.Tensor:\n with torch.autocast(\"cuda\", enabled=False):\n r = torch.fft.ifftshift(r.float(), dim=(-2, -1))\n f = torch.fft.fftshift(torch.fft.fftn(r, s=(r.shape[-2], r.shape[-1]), dim=(-2, -1)), dim=(-2, -1))\n return f" }, { "identifier": "fourier_to_primal_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "def fourier_to_primal_2d(f: torch.Tensor) -> torch.Tensor:\n f = torch.fft.ifftshift(f, dim=(-2, -1))\n return torch.fft.fftshift(torch.fft.ifftn(f, s=(f.shape[-2], f.shape[-1]), dim=(-2, -1)), dim=(-2, -1))" }, { "identifier": "Polymer", "path": "cryostar/utils/polymer.py", "snippet": "class Polymer:\n chain_id: np.ndarray\n res_id: np.ndarray\n res_name: np.ndarray\n coord: np.ndarray\n atom_name: np.ndarray\n element: np.ndarray\n num_electron: np.ndarray\n\n def __init__(self, num):\n self.chain_id = np.empty(num, dtype=\"U4\")\n self.res_id = np.zeros(num, dtype=int)\n self.res_name = np.empty(num, dtype=\"U3\")\n self.coord = np.zeros((num, 3), dtype=np.float32)\n self.atom_name = np.empty(num, dtype=\"U6\")\n self.element = np.empty(num, dtype=\"U2\")\n self.num_electron = np.zeros(num, dtype=int)\n\n def __setitem__(self, index, kwargs):\n assert set(kwargs.keys()).issubset(f.name for f in dataclasses.fields(self))\n for k, v in kwargs.items():\n getattr(self, k)[index] = v\n\n def __getitem__(self, index):\n return {f.name: getattr(self, f.name)[index] for f in dataclasses.fields(self)}\n\n def __len__(self):\n return len(self.chain_id)\n\n @property\n def num_amino_acids(self):\n return np.sum(np.isin(self.atom_name, AA_ATOMS))\n\n @property\n def num_nucleotides(self):\n return np.sum(np.isin(self.atom_name, NT_ATOMS))\n\n @property\n def num_chains(self):\n return len(np.unique(self.chain_id))\n\n @classmethod\n def from_atom_arr(cls, atom_arr):\n assert isinstance(atom_arr, struc.AtomArray)\n\n nt_arr = atom_arr[struc.filter_nucleotides(atom_arr)]\n aa_arr = atom_arr[struc.filter_amino_acids(atom_arr)]\n\n num = 0\n if len(aa_arr) > 0:\n num += struc.get_residue_count(aa_arr)\n if len(nt_arr) > 0:\n for res in struc.residue_iter(nt_arr):\n valid_atoms = set(res.atom_name).intersection(NT_ATOMS)\n if len(valid_atoms) <= 0:\n raise UserWarning(f\"Nucleotides doesn't contain {' or '.join(NT_ATOMS)}.\")\n else:\n num += len(valid_atoms)\n meta = cls(num)\n\n def _update_res(tmp_res, kind=\"aa\"):\n nonlocal pos\n\n if kind == \"aa\":\n using_atom_names = AA_ATOMS\n filtered_res = tmp_res[struc.filter_peptide_backbone(tmp_res)]\n elif kind == \"nt\":\n using_atom_names = NT_ATOMS\n filtered_res = tmp_res\n else:\n raise NotImplemented\n\n valid_atom_names = set(tmp_res.atom_name).intersection(using_atom_names)\n\n for select_atom_name in valid_atom_names:\n meta[pos] = {\n \"chain_id\": tmp_res.chain_id[0],\n \"res_id\": tmp_res.res_id[0],\n \"res_name\": tmp_res.res_name[0],\n \"coord\": filtered_res[filtered_res.atom_name == select_atom_name].coord,\n \"atom_name\": select_atom_name,\n \"element\": filtered_res[filtered_res.atom_name == select_atom_name].element[0],\n \"num_electron\": get_num_electrons(tmp_res) // len(valid_atom_names)\n }\n pos += 1\n\n def _update(tmp_arr, kind=\"aa\"):\n nonlocal pos\n for chain in struc.chain_iter(tmp_arr):\n for tmp_res in struc.residue_iter(chain):\n _update_res(tmp_res, kind)\n\n pos = 0\n\n if len(aa_arr) > 0:\n _update(aa_arr, kind=\"aa\")\n if len(nt_arr) > 0:\n _update(nt_arr, kind=\"nt\")\n\n assert pos == num\n return meta\n\n @classmethod\n def from_pdb(cls, file_path):\n atom_arr = bt_read_pdb(file_path)\n if atom_arr.stack_depth() > 1:\n print(\"PDB file contains more than 1 models, select the 1st model\")\n atom_arr = atom_arr[0]\n return Polymer.from_atom_arr(atom_arr)\n\n def to_atom_arr(self):\n num = len(self)\n atom_arr = struc.AtomArray(num)\n atom_arr.coord = self.coord\n\n for f in dataclasses.fields(self):\n if f.name != \"coord\" and f.name in atom_arr.get_annotation_categories():\n atom_arr.set_annotation(f.name, getattr(self, f.name))\n # atom_arr.atom_name[atom_arr.atom_name == \"R\"] = \"CB\"\n return atom_arr" }, { "identifier": "NT_ATOMS", "path": "cryostar/utils/polymer.py", "snippet": "NT_ATOMS = (\"C1'\", )" }, { "identifier": "AA_ATOMS", "path": "cryostar/utils/polymer.py", "snippet": "AA_ATOMS = (\"CA\", )" }, { "identifier": "find_quaint_cutoff_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def find_quaint_cutoff_pairs(coord_arr,\n chain_id_arr,\n res_id_arr,\n intra_chain_cutoff=12.,\n inter_chain_cutoff=12.,\n intra_chain_res_bound=None):\n sel_indices = []\n dist_map = distance.cdist(coord_arr, coord_arr, metric='euclidean')\n # 1. intra chain\n sel_mask = dist_map <= intra_chain_cutoff\n sel_mask = np.triu(sel_mask, k=1)\n # get indices of valid pairs\n indices_in_pdb = np.nonzero(sel_mask)\n indices_in_pdb = np.column_stack((indices_in_pdb[0], indices_in_pdb[1]))\n indices_in_pdb = indices_in_pdb[chain_id_arr[indices_in_pdb[:, 0]] == chain_id_arr[indices_in_pdb[:, 1]]]\n # filter by res_id\n if intra_chain_res_bound is not None:\n assert res_id_arr is not None\n res_ids = res_id_arr[indices_in_pdb]\n res_id_dist = np.abs(np.diff(res_ids, axis=1)).flatten()\n indices_in_pdb = indices_in_pdb[res_id_dist <= intra_chain_res_bound]\n\n sel_indices.append(indices_in_pdb)\n\n # 2. inter chain\n if inter_chain_cutoff is not None:\n sel_mask = dist_map <= inter_chain_cutoff\n sel_mask = np.triu(sel_mask, k=1)\n indices_in_pdb = np.nonzero(sel_mask)\n indices_in_pdb = np.column_stack((indices_in_pdb[0], indices_in_pdb[1]))\n indices_in_pdb = indices_in_pdb[chain_id_arr[indices_in_pdb[:, 0]] != chain_id_arr[indices_in_pdb[:, 1]]]\n sel_indices.append(indices_in_pdb)\n\n sel_indices = np.vstack(sel_indices)\n return sel_indices" }, { "identifier": "find_range_cutoff_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def find_range_cutoff_pairs(coord_arr, min_cutoff=4., max_cutoff=10.):\n dist_map = distance.cdist(coord_arr, coord_arr, metric='euclidean')\n sel_mask = (dist_map <= max_cutoff) & (dist_map >= min_cutoff)\n indices_in_pdb = np.nonzero(sel_mask)\n indices_in_pdb = np.column_stack((indices_in_pdb[0], indices_in_pdb[1]))\n return indices_in_pdb" }, { "identifier": "find_continuous_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def find_continuous_pairs(chain_id_arr, res_id_arr, atom_name_arr):\n pairs = []\n\n # res_id in different chains are duplicated, so loop on chains\n u_chain_id = np.unique(chain_id_arr)\n\n for c_id in u_chain_id:\n tmp_mask = chain_id_arr == c_id\n tmp_indices_in_pdb = np.nonzero(tmp_mask)[0]\n\n tmp_res_id_arr = res_id_arr[tmp_mask]\n tmp_atom_name_arr = atom_name_arr[tmp_mask]\n\n # check is aa or nt\n tmp_atom_name_set = set(tmp_atom_name_arr)\n\n if len(tmp_atom_name_set.intersection(AA_ATOMS)) > len(tmp_atom_name_set.intersection(NT_ATOMS)):\n in_res_atom_names = AA_ATOMS\n elif len(tmp_atom_name_set.intersection(AA_ATOMS)) < len(tmp_atom_name_set.intersection(NT_ATOMS)):\n in_res_atom_names = NT_ATOMS\n else:\n raise NotImplemented(\"Cannot determine chain is amino acid or nucleotide.\")\n\n # find pairs\n if len(in_res_atom_names) == 1:\n u_res_id, indices_in_chain = np.unique(tmp_res_id_arr, return_index=True)\n if len(u_res_id) != np.sum(tmp_mask):\n raise ValueError(f\"Found duplicate residue id in single chain {c_id}.\")\n\n indices_in_chain_pair = np.column_stack((indices_in_chain[:-1], indices_in_chain[1:]))\n\n # must be adjacent on residue id\n valid_mask = np.abs(np.diff(u_res_id[indices_in_chain_pair], axis=1)) == 1\n\n indices_in_chain_pair = indices_in_chain_pair[valid_mask.flatten()]\n\n indices_in_pdb_pair = tmp_indices_in_pdb[indices_in_chain_pair]\n elif len(in_res_atom_names) > 1:\n\n def _cmp(a, b):\n # res_id compare\n if a[0] != b[0]:\n return a[0] - b[0]\n else:\n # atom_name in the same order of AA_ATOMS or NT_ATOMS\n return in_res_atom_names.index(a[1]) - in_res_atom_names.index(b[1])\n\n cache = list(zip(tmp_res_id_arr, tmp_atom_name_arr, tmp_indices_in_pdb))\n sorted_cache = list(sorted(cache, key=cmp_to_key(_cmp)))\n\n sorted_indices_in_pdb = [item[2] for item in sorted_cache]\n sorted_res_id = [item[0] for item in sorted_cache]\n\n indices_in_pdb_pair = np.column_stack((sorted_indices_in_pdb[:-1], sorted_indices_in_pdb[1:]))\n\n valid_mask = np.abs(np.diff(np.column_stack((sorted_res_id[:-1], sorted_res_id[1:])), axis=1)) <= 1\n\n indices_in_pdb_pair = indices_in_pdb_pair[valid_mask.flatten()]\n else:\n raise NotImplemented(\"No enough atoms to construct continuous pairs.\")\n\n pairs.append(indices_in_pdb_pair)\n\n pairs = np.vstack(pairs)\n return pairs" }, { "identifier": "calc_dist_by_pair_indices", "path": "cryostar/utils/dist_loss.py", "snippet": "def calc_dist_by_pair_indices(coord_arr, pair_indices):\n coord_pair_arr = coord_arr[pair_indices] # num_pair, 2, 3\n dist = np.linalg.norm(np.diff(coord_pair_arr, axis=1), ord=2, axis=-1)\n return dist.flatten()" }, { "identifier": "remove_duplicate_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def remove_duplicate_pairs(pairs_a, pairs_b, remove_flip=True):\n \"\"\"Remove pair b from a\"\"\"\n s = max(pairs_a.max(), pairs_b.max()) + 1\n # trick for fast comparison\n mask = np.zeros((s, s), dtype=bool)\n\n np.put(mask, np.ravel_multi_index(pairs_a.T, mask.shape), True)\n np.put(mask, np.ravel_multi_index(pairs_b.T, mask.shape), False)\n if remove_flip:\n np.put(mask, np.ravel_multi_index(np.flip(pairs_b, 1).T, mask.shape), False)\n return np.column_stack(np.nonzero(mask))" }, { "identifier": "filter_same_chain_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def filter_same_chain_pairs(pair_ids, chain_id_arr):\n chain_ids = chain_id_arr[pair_ids]\n\n same_chain_mask = chain_ids[:, 0] == chain_ids[:, 1]\n\n pair_mask = []\n\n for u in np.unique(chain_ids):\n tmp = np.logical_and(chain_ids[:, 0] == u, same_chain_mask)\n if np.any(tmp):\n pair_mask.append(tmp)\n\n if len(pair_mask) > 0:\n return np.row_stack(pair_mask)\n else:\n return None" }, { "identifier": "DistLoss", "path": "cryostar/utils/dist_loss.py", "snippet": "class DistLoss(nn.Module):\n\n def __init__(self, pair_ids, gt_dists, reduction=\"mean\"):\n super().__init__()\n self.reduction = reduction\n\n self.register_buffer(\"pair_ids\", torch.from_numpy(pair_ids).long())\n self.register_buffer(\"gt_dists\", torch.from_numpy(gt_dists).float())\n\n # edge-wise weights\n # raw_weights = torch.ones(len(pair_ids), dtype=torch.float) * 3.\n #\n # self.register_parameter(\"raw_weights\", nn.Parameter(raw_weights))\n\n # RBF residue-wise weights\n # u_left_ids = np.unique(pair_ids[:, 0])\n #\n # std_idx = np.zeros(max(u_left_ids) + 1, dtype=int)\n # sparse_idx = np.arange(len(u_left_ids))\n #\n # std_idx[u_left_ids] = sparse_idx\n #\n # select_index = std_idx[pair_ids[:, 0]]\n\n # weight = 0.9 at dist_rescale\n # sigmas = torch.ones(max(u_left_ids) + 1, dtype=torch.float) * np.sqrt(-0.5 / np.log(0.9))\n #\n # self.dist_rescale = dist_rescale\n # self.register_buffer(\"select_index\", torch.from_numpy(select_index).long())\n # self.register_parameter(\"sigmas\", nn.Parameter(sigmas))\n\n # def get_weights(self):\n # return torch.sigmoid(self.raw_weights)\n # edge_sigmas = torch.index_select(self.sigmas, dim=0, index=self.select_index)\n # weights = torch.exp(-torch.pow(self.gt_dists / self.dist_rescale, 2) / (2 * torch.pow(edge_sigmas, 2)))\n # return weights\n\n def calc_pair_dists(self, batch_struc):\n batch_dist = batch_struc[:, self.pair_ids] # bsz, num_pair, 2, 3\n batch_dist = LA.vector_norm(torch.diff(batch_dist, dim=-2), axis=-1).squeeze(-1) # bsz, num_pair\n return batch_dist\n\n def forward(self, batch_struc):\n batch_dist = self.calc_pair_dists(batch_struc)\n # mse = torch.pow(batch_dist - self.gt_dists.unsqueeze(0), 2) * self.get_weights().unsqueeze(0)\n mse = torch.pow(batch_dist - self.gt_dists.unsqueeze(0), 2)\n if self.reduction is None:\n return mse\n elif self.reduction == \"mean\":\n return torch.mean(mse)\n else:\n raise NotImplementedError" }, { "identifier": "get_nearest_point", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def get_nearest_point(data: np.ndarray, query: np.ndarray) -> Tuple[npt.NDArray[np.float32], np.ndarray]:\n \"\"\"\n Find closest point in @data to @query\n Return datapoint, index\n \"\"\"\n ind = cdist(query, data).argmin(axis=1)\n return data[ind], ind" }, { "identifier": "cluster_kmeans", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def cluster_kmeans(z: np.ndarray, K: int, on_data: bool = True, reorder: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Cluster z by K means clustering\n Returns cluster labels, cluster centers\n If reorder=True, reorders clusters according to agglomerative clustering of cluster centers\n \"\"\"\n kmeans = KMeans(n_clusters=K, n_init=10, random_state=0, max_iter=10)\n labels = kmeans.fit_predict(z)\n centers = kmeans.cluster_centers_\n\n centers_ind = None\n if on_data:\n centers, centers_ind = get_nearest_point(z, centers)\n\n if reorder:\n # BUG from seaborn or scipy:\n # sns.clustermap only supports data with at least 2 dim\n if z.shape[1] == 1:\n centers = np.hstack([centers, np.zeros_like(centers)])\n g = sns.clustermap(centers)\n reordered = g.dendrogram_row.reordered_ind\n centers = centers[reordered]\n if centers_ind is not None:\n centers_ind = centers_ind[reordered]\n tmp = {k: i for i, k in enumerate(reordered)}\n labels = np.array([tmp[k] for k in labels])\n if z.shape[1] == 1:\n centers = centers[:, :1]\n return labels, centers" }, { "identifier": "run_pca", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def run_pca(z: np.ndarray) -> Tuple[np.ndarray, PCA]:\n pca = PCA(z.shape[1])\n pca.fit(z)\n # print(\"Explained variance ratio:\")\n # print(pca.explained_variance_ratio_)\n pc = pca.transform(z)\n return pc, pca" }, { "identifier": "get_pc_traj", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def get_pc_traj(\n pca: PCA,\n zdim: int,\n numpoints: int,\n dim: int,\n start: Optional[float] = 5,\n end: Optional[float] = 95,\n percentiles: Optional[np.ndarray] = None,\n) -> npt.NDArray[np.float32]:\n \"\"\"\n Create trajectory along specified principal component\n\n Inputs:\n pca: sklearn PCA object from run_pca\n zdim (int)\n numpoints (int): number of points between @start and @end\n dim (int): PC dimension for the trajectory (1-based index)\n start (float): Value of PC{dim} to start trajectory\n end (float): Value of PC{dim} to stop trajectory\n percentiles (np.array or None): Define percentile array instead of np.linspace(start,stop,numpoints)\n\n Returns:\n np.array (numpoints x zdim) of z values along PC\n \"\"\"\n if percentiles is not None:\n assert len(percentiles) == numpoints\n traj_pca = np.zeros((numpoints, zdim))\n if percentiles is not None:\n traj_pca[:, dim - 1] = percentiles\n else:\n assert start is not None\n assert end is not None\n traj_pca[:, dim - 1] = np.linspace(start, end, numpoints)\n ztraj_pca = pca.inverse_transform(traj_pca)\n return ztraj_pca" }, { "identifier": "run_umap", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def run_umap(z: np.ndarray, **kwargs) -> Tuple[np.ndarray, umap.UMAP]:\n reducer = umap.UMAP(**kwargs)\n z_embedded = reducer.fit_transform(z)\n return z_embedded, reducer" }, { "identifier": "plot_z_dist", "path": "cryostar/utils/vis_utils.py", "snippet": "def plot_z_dist(z, extra_cluster=None, save_path=None):\n if z.shape[-1] == 1:\n fig = sns.displot(x=z[:, 0])\n fig.set_xlabels(\"z values\")\n if save_path is not None:\n fig.savefig(save_path)\n elif z.shape[-1] == 2:\n sns.set()\n fig = sns.jointplot(x=z[:, 0], y=z[:, 1], kind=\"kde\", fill=True)\n ax = fig.figure.axes\n if extra_cluster is not None:\n ax[0].scatter(extra_cluster[:, 0], extra_cluster[:, 1], marker='.', color='tab:orange')\n if save_path is not None:\n fig.savefig(save_path)\n else:\n raise ValueError(f\"input z with shape {z.shape}\")" }, { "identifier": "save_tensor_image", "path": "cryostar/utils/vis_utils.py", "snippet": "def save_tensor_image(tensors, save_path, mask=None):\n # normalize\n max_val = torch.max(tensors.flatten(start_dim=1), 1)[0][:, None, None, None]\n min_val = torch.min(tensors.flatten(start_dim=1), 1)[0][:, None, None, None]\n tensors = (tensors - min_val) / (max_val - min_val)\n\n show_img = ToPILImage()(make_grid(tensors, nrow=5))\n if mask is None:\n show_img.save(save_path)\n else:\n show_img = np.copy(np.asarray(show_img))\n # show_img = cv2.cvtColor(show_img, cv2.COLOR_GRAY2RGB)\n if mask.ndim == 2:\n mask = mask[None]\n mask = ToPILImage()(make_grid(mask.expand(tensors.shape[0], -1, -1, -1), nrow=5))\n mask = np.invert(np.asarray(mask).astype(bool))[..., 0]\n color_mask = np.array([[0, 0, 0], [31, 119, 180]], dtype=np.uint8)\n color_mask = color_mask[mask.astype(int)]\n show_img[mask] = cv2.addWeighted(show_img[mask], 0.5, color_mask[mask], 0.5, 0)\n show_img = Image.fromarray(show_img)\n show_img.save(save_path)" }, { "identifier": "merge_step_outputs", "path": "cryostar/utils/pl_utils.py", "snippet": "def merge_step_outputs(outputs):\n ks = outputs[0].keys()\n res = {}\n for k in ks:\n res[k] = torch.concat([out[k] for out in outputs], dim=0)\n return res" }, { "identifier": "squeeze_dict_outputs_1st_dim", "path": "cryostar/utils/pl_utils.py", "snippet": "def squeeze_dict_outputs_1st_dim(outputs):\n res = {}\n for k in outputs.keys():\n res[k] = outputs[k].flatten(start_dim=0, end_dim=1)\n return res" }, { "identifier": "filter_outputs_by_indices", "path": "cryostar/utils/pl_utils.py", "snippet": "def filter_outputs_by_indices(outputs, indices):\n res = {}\n for k in outputs.keys():\n res[k] = outputs[k][indices]\n return res" }, { "identifier": "get_1st_unique_indices", "path": "cryostar/utils/pl_utils.py", "snippet": "def get_1st_unique_indices(t):\n _, idx, counts = torch.unique(t, dim=None, sorted=True, return_inverse=True, return_counts=True)\n # ind_sorted: the index corresponding to same unique value will be grouped by these indices\n _, ind_sorted = torch.sort(idx, stable=True)\n cum_sum = counts.cumsum(0)\n cum_sum = torch.cat((cum_sum.new_tensor([\n 0,\n ]), cum_sum[:-1]))\n first_idx = ind_sorted[cum_sum]\n return first_idx" } ]
import os.path as osp import warnings import collections import einops import numpy as np import biotite.structure as struc import torch import lightning.pytorch as pl from pathlib import Path from copy import deepcopy from torch import nn from torch import optim from torch.utils.data import DataLoader from torchinfo import summary from lightning.fabric.utilities.warnings import PossibleUserWarning from lightning.pytorch.utilities import rank_zero_only from lightning.pytorch.strategies import DDPStrategy from mmengine import mkdir_or_exist from cryostar.utils.transforms import SpatialGridTranslate from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig, Mask from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.losses import calc_cor_loss, calc_kl_loss from cryostar.utils.misc import log_to_current, \ pl_init_exp, pretty_dict, set_seed, warmup from cryostar.utils.pdb_tools import bt_save_pdb from cryostar.gmm.gmm import EMAN2Grid, batch_projection, Gaussian from cryostar.gmm.deformer import E3Deformer, NMADeformer from cryostar.utils.fft_utils import primal_to_fourier_2d, fourier_to_primal_2d from cryostar.utils.polymer import Polymer, NT_ATOMS, AA_ATOMS from cryostar.utils.dist_loss import (find_quaint_cutoff_pairs, find_range_cutoff_pairs, find_continuous_pairs, calc_dist_by_pair_indices, remove_duplicate_pairs, filter_same_chain_pairs, DistLoss) from cryostar.utils.latent_space_utils import get_nearest_point, cluster_kmeans, run_pca, get_pc_traj, run_umap from cryostar.utils.vis_utils import plot_z_dist, save_tensor_image from cryostar.utils.pl_utils import merge_step_outputs, squeeze_dict_outputs_1st_dim, \ filter_outputs_by_indices, get_1st_unique_indices from miscs import calc_pair_dist_loss, calc_clash_loss, low_pass_mask2d, VAE, infer_ctf_params_from_config
16,578
# only gmm supervision should be low-passed if self.lp_mask2d is not None: lp_gt_images = self.low_pass_images(gt_images) else: lp_gt_images = gt_images gmm_proj_loss = calc_cor_loss(pred_gmm_images, lp_gt_images, self.mask) weighted_gmm_proj_loss = cfg.loss.gmm_cryoem_weight * gmm_proj_loss if hasattr(self, "connect_pairs"): connect_loss = calc_pair_dist_loss(pred_struc, self.connect_pairs, self.connect_dists) weighted_connect_loss = cfg.loss.connect_weight * connect_loss else: weighted_connect_loss = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "sse_pairs"): sse_loss = calc_pair_dist_loss(pred_struc, self.sse_pairs, self.sse_dists) weighted_sse_loss = cfg.loss.connect_weight * sse_loss else: weighted_sse_loss = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "dist_loss_fn"): dist_loss = self.dist_loss_fn(pred_struc) # across devices all_dist_loss = self.all_gather(dist_loss) # world_size, batch, num_pairs all_dist_loss = all_dist_loss.reshape(-1, dist_loss.shape[-1]) # chain-wise drop with torch.no_grad(): keep_mask = torch.ones(dist_loss.shape[-1], dtype=torch.bool).to(dist_loss.device) for i in range(len(self.cutoff_chain_mask)): tmp_mask = self.cutoff_chain_mask[i] tmp_var = all_dist_loss.index_select(dim=1, index=tmp_mask.nonzero(as_tuple=True)[0]).var(dim=0) intra_chain_keep_mask = tmp_var.lt(torch.quantile(tmp_var, cfg.loss.dist_keep_ratio)) keep_mask[tmp_mask] *= intra_chain_keep_mask keep_mask = keep_mask.unsqueeze(0).repeat(dist_loss.size(0), 1) dist_loss = torch.mean(dist_loss[keep_mask]) weighted_dist_loss = cfg.loss.dist_weight * dist_loss # dist_penalty = torch.mean(torch.abs(self.dist_loss_fn.get_weights())) # weighted_dist_penalty = cfg.loss.dist_penalty_weight * dist_penalty else: weighted_dist_loss = weighted_gmm_proj_loss.new_tensor(0.) # weighted_dist_penalty = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "clash_pairs"): clash_loss = calc_clash_loss(pred_struc, self.clash_pairs, cfg.loss.clash_min_cutoff) weighted_clash_loss = cfg.loss.clash_weight * clash_loss else: weighted_clash_loss = weighted_gmm_proj_loss.new_tensor(0.) # KL kl_loss = calc_kl_loss(mu, log_var, self.cfg.loss.free_bits) kl_beta = warmup(cfg.loss.warmup_step, upper=cfg.loss.kl_beta_upper)(self.global_step) weighted_kld_loss = kl_beta * kl_loss / self.mask.num_masked # clac loss loss = (weighted_kld_loss + weighted_gmm_proj_loss + weighted_connect_loss + weighted_dist_loss + weighted_sse_loss + weighted_clash_loss) tmp_metric = { "loss": loss.item(), "cryoem(gmm)": weighted_gmm_proj_loss.item(), "con": weighted_connect_loss.item(), "sse": weighted_sse_loss.item(), "dist": weighted_dist_loss.item(), # "dist_penalty": weighted_dist_penalty.item(), "clash": weighted_clash_loss.item(), "kld": weighted_kld_loss.item(), "kld(/dim)": kl_loss.item() } if self.global_step % cfg.runner.log_every_n_step == 0: self.log_dict(tmp_metric) log_to_current(f"epoch {self.current_epoch} [{batch_idx}/{self.trainer.num_training_batches}] | " + pretty_dict(tmp_metric, 5)) return loss def validation_step(self, batch, batch_idx): gt_images = batch["proj"] idxes = batch["idx"] # if self.lp_mask2d is not None: # gt_images = self.low_pass_images(gt_images) mu, log_var = self.model.encode(prepare_images(gt_images, self.cfg.model.input_space), idxes) z = mu self.validation_step_outputs.append({"z": z, "idx": idxes}) def on_validation_epoch_end(self): # lightning will automatically copy val samples to let val_loader to be divided by gpu_num with no remainder, # here use sample id to remove redundancy all_outputs = merge_step_outputs(self.validation_step_outputs) all_outputs = self.all_gather(all_outputs) all_outputs = squeeze_dict_outputs_1st_dim(all_outputs) if self.trainer.is_global_zero and len(all_outputs) > 0: # save projection images for checking self._shared_image_check() save_dir = self._get_save_dir() # -------- # dealing with all z indices = get_1st_unique_indices(all_outputs["idx"]) log_to_current(f"Total {len(indices)} unique samples") all_outputs = filter_outputs_by_indices(all_outputs, indices) z_list = all_outputs["z"] z_list = z_list.cpu().numpy() # (num_samples, latent_dim) np.save(osp.join(save_dir, "z.npy"), z_list) # -------- # Kmeans cluster kmeans_labels, centers = cluster_kmeans(z_list, self.cfg.analyze.cluster_k) centers, centers_ind = get_nearest_point(z_list, centers) if z_list.shape[-1] > 2 and not self.cfg.analyze.skip_umap: log_to_current("Running UMAP...")
# other # avoid num_workers set as cpu_count warning warnings.simplefilter("ignore", PossibleUserWarning) # only log to rank_zero, comment this for debugging log_to_current = rank_zero_only(log_to_current) TASK_NAME = "atom" def prepare_images(images: torch.FloatTensor, space: str): assert space in ("real", "fourier") if space == "real": model_input = einops.rearrange(images, "b 1 ny nx -> b (1 ny nx)") else: fimages = primal_to_fourier_2d(images) model_input = einops.rearrange(torch.view_as_real(fimages), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) return model_input class InitTask(pl.LightningModule): def __init__(self, em_module): super().__init__() self.cfg = em_module.cfg self.em_module = em_module self.loss_deque = collections.deque([ 10, ], maxlen=20) def on_train_batch_end(self, outputs, batch, batch_idx): self.loss_deque.append(outputs['loss'].item()) if np.mean(self.loss_deque) < 1e-3: self.trainer.should_stop = True # update all process status self.trainer.should_stop = self.trainer.strategy.broadcast(self.trainer.should_stop) def training_step(self, batch, batch_idx): images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.em_module.get_batch_pose(batch) pred_deformation, mu, log_var = self.em_module.model(prepare_images(images, self.cfg.model.input_space), idxes, rot_mats) shift_loss = torch.mean(torch.pow(pred_deformation.flatten(start_dim=-2), 2)) loss = shift_loss if self.global_step % self.cfg.runner.log_every_n_step == 0: log_to_current(f"loss {loss.item()}") return loss def configure_optimizers(self): return optim.AdamW(self.em_module.model.parameters(), lr=1e-4) def on_fit_end(self): log_to_current(f"Init finished with loss {np.mean(self.loss_deque)}") class CryoEMTask(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() cfg = deepcopy(cfg) self.cfg = cfg # Define GMM meta = Polymer.from_pdb(cfg.dataset_attr.ref_pdb_path) log_to_current(f"Load reference structure from {cfg.dataset_attr.ref_pdb_path}") # for save self.template_pdb = meta.to_atom_arr() log_to_current(f"Protein contains {len(meta)} atoms, " f"{meta.num_amino_acids} amino acids, " f"{meta.num_nucleotides} nucleotides, " f"{meta.num_chains} chains.") # ref ref_centers = torch.from_numpy(meta.coord).float() ref_amps = torch.from_numpy(meta.num_electron).float() ref_sigmas = torch.ones_like(ref_amps) ref_sigmas.fill_(2.) log_to_current(f"1st GMM blob amplitude {ref_amps[0].item()}, sigma {ref_sigmas[0].item()}") num_pts = len(meta) log_to_current(f"Reference structure has {num_pts} atom coordinates") # tunable params # gmm self.register_buffer("gmm_centers", ref_centers) if cfg.gmm.tunable: log_to_current("Set GMM sigmas, amplitudes tunable") self.register_parameter("gmm_sigmas", nn.Parameter(ref_sigmas)) self.register_parameter("gmm_amps", nn.Parameter(ref_amps)) else: self.register_buffer("gmm_sigmas", ref_sigmas) self.register_buffer("gmm_amps", ref_amps) nma_modes = None if (hasattr(self.cfg.extra_input_data_attr, "nma_path") and self.cfg.extra_input_data_attr.nma_path not in ["", None]): nma_modes = torch.tensor(np.load(self.cfg.extra_input_data_attr.nma_path), dtype=torch.float32) log_to_current(f"Load NMA coefficients from {self.cfg.extra_input_data_attr.nma_path}, " f"whose shape is {nma_modes.shape}") # model if cfg.model.input_space == "fourier": in_dim = 2 * cfg.data_process.down_side_shape ** 2 elif cfg.model.input_space == "real": in_dim = cfg.data_process.down_side_shape ** 2 else: raise NotImplementedError self.model = VAE(in_dim=in_dim, out_dim=num_pts * 3 if nma_modes is None else 6 + nma_modes.shape[1], **cfg.model.model_cfg) log_to_current('Model summary:\n' + str(summary(self.model, input_size=[(1, in_dim), (1,)], verbose=0))) if nma_modes is None: self.deformer = E3Deformer() else: self.deformer = NMADeformer(nma_modes) # loss or regularization's preparation # dist loss connect_pairs = find_continuous_pairs(meta.chain_id, meta.res_id, meta.atom_name) if cfg.extra_input_data_attr.use_domain: log_to_current("use domain instead of chain!") domain_id = np.load(cfg.extra_input_data_attr.domain_path) cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, domain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) else: # deal with RNA/DNA if np.sum(np.isin(meta.atom_name, NT_ATOMS)): # aa tmp_mask = np.isin(meta.atom_name, AA_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] aa_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) aa_cutoff_pairs = indices_in_pdb[aa_cutoff_pairs] log_to_current(f"{len(aa_cutoff_pairs)} AA pairs") # nt tmp_mask = np.isin(meta.atom_name, NT_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] nt_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.nt_intra_chain_cutoff, cfg.loss.nt_inter_chain_cutoff, cfg.loss.nt_intra_chain_res_bound) nt_cutoff_pairs = indices_in_pdb[nt_cutoff_pairs] log_to_current(f"{len(nt_cutoff_pairs)} NT pairs") cutoff_pairs = np.vstack((aa_cutoff_pairs, nt_cutoff_pairs)) else: cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, connect_pairs) if cfg.loss.sse_weight != 0.0: log_to_current("use pseduo `sse` by building spatial/sequential edges") sse_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, 0, 20) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, sse_pairs) clash_pairs = find_range_cutoff_pairs(meta.coord, cfg.loss.clash_min_cutoff) clash_pairs = remove_duplicate_pairs(clash_pairs, connect_pairs) if len(connect_pairs) > 0: self.register_buffer("connect_pairs", torch.from_numpy(connect_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, connect_pairs) self.register_buffer("connect_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(connect_pairs)} connect_pairs") else: log_to_current("connect_pairs is empty") if cfg.loss.sse_weight != 0.0: self.register_buffer("sse_pairs", torch.from_numpy(sse_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, sse_pairs) self.register_buffer("sse_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(sse_pairs)} sse_pairs") if len(cutoff_pairs) > 0: dists = calc_dist_by_pair_indices(meta.coord, cutoff_pairs) log_to_current(f"found {len(cutoff_pairs)} cutoff_pairs") self.dist_loss_fn = DistLoss(cutoff_pairs, dists, reduction=None) # for chain-wise dropout cutoff_chain_mask = filter_same_chain_pairs(cutoff_pairs, meta.chain_id) self.register_buffer("cutoff_chain_mask", torch.from_numpy(cutoff_chain_mask)) else: log_to_current("cutoff_pairs is empty") if len(clash_pairs) > 0: self.register_buffer("clash_pairs", torch.from_numpy(clash_pairs).long()) log_to_current(f"found {len(clash_pairs)} clash_pairs") else: log_to_current("clash_pairs is empty") # low-pass filtering if hasattr(cfg.data_process, "low_pass_bandwidth"): log_to_current(f"Use low-pass filtering w/ {cfg.data_process.low_pass_bandwidth} A") lp_mask2d = low_pass_mask2d(cfg.data_process.down_side_shape, cfg.data_process.down_apix, cfg.data_process.low_pass_bandwidth) self.register_buffer("lp_mask2d", torch.from_numpy(lp_mask2d).float()) else: self.lp_mask2d = None # self.mask = Mask(cfg.data_process.down_side_shape, rad=cfg.loss.mask_rad_for_image_loss) # for projection grid = EMAN2Grid(side_shape=cfg.data_process.down_side_shape, voxel_size=cfg.data_process.down_apix) self.grid = grid ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) # translate image helper self.translator = SpatialGridTranslate(D=cfg.data_process.down_side_shape, device=self.device) self.apix = self.cfg.data_process.down_apix # cache self.validation_step_outputs = [] self.stored_metrics = {} self.history_saved_dirs = [] if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") self._load_ckpt(self.cfg.extra_input_data_attr.ckpt_path) def _save_ckpt(self, ckpt_path): torch.save( { "model": self.model.state_dict(), "gmm_sigmas": self.gmm_sigmas.data, "gmm_amps": self.gmm_amps.data }, ckpt_path) def _load_ckpt(self, ckpt_path): state_dict = torch.load(ckpt_path, map_location=self.device) self.model.load_state_dict(state_dict["model"]) if self.cfg.gmm.tunable: self.gmm_sigmas.data = state_dict["gmm_sigmas"] self.gmm_amps.data = state_dict["gmm_amps"] def _get_save_dir(self): save_dir = osp.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def low_pass_images(self, images): f_images = primal_to_fourier_2d(images) f_images = f_images * self.lp_mask2d images = fourier_to_primal_2d(f_images).real return images def get_batch_pose(self, batch): rot_mats = batch["rotmat"] # yx order trans_mats = torch.concat((batch["shiftY"].unsqueeze(1), batch["shiftX"].unsqueeze(1)), dim=1) trans_mats /= self.apix return rot_mats, trans_mats def _shared_forward(self, images, idxes, rots): # predict structure pred_deformation, mu, log_var = self.model(prepare_images(images, self.cfg.model.input_space), idxes, rots) return pred_deformation, mu, log_var def _shared_projection(self, pred_struc, rot_mats): pred_images = batch_projection( gauss=Gaussian( mus=pred_struc, sigmas=self.gmm_sigmas.unsqueeze(0), # (b, num_centers) amplitudes=self.gmm_amps.unsqueeze(0)), rot_mats=rot_mats, line_grid=self.grid.line()) pred_images = einops.rearrange(pred_images, 'b y x -> b 1 y x') return pred_images def _apply_ctf(self, batch, real_proj, freq_mask=None): f_proj = primal_to_fourier_2d(real_proj) f_proj = self._apply_ctf_f(batch, f_proj, freq_mask) # Note: here only use the real part proj = fourier_to_primal_2d(f_proj).real return proj def _apply_ctf_f(self, batch, f_proj, freq_mask=None): pred_ctf_params = {k: batch[k] for k in ('defocusU', 'defocusV', 'angleAstigmatism') if k in batch} f_proj = self.ctf(f_proj, batch['idx'], ctf_params=pred_ctf_params, mode="gt", frequency_marcher=None) if freq_mask is not None: f_proj = f_proj * self.lp_mask2d return f_proj def _shared_infer(self, batch): gt_images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.get_batch_pose(batch) # if self.lp_mask2d is not None: # gt_images = self.low_pass_images(gt_images) # prediction pred_deformation, mu, log_var = self._shared_forward(gt_images, idxes, rot_mats) pred_struc = self.deformer.transform(pred_deformation, self.gmm_centers) # get gmm projections pred_gmm_images = self._shared_projection(pred_struc, rot_mats) # apply ctf, low-pass pred_gmm_images = self._apply_ctf(batch, pred_gmm_images, self.lp_mask2d) if trans_mats is not None: gt_images = self.translator.transform(einops.rearrange(gt_images, "B 1 NY NX -> B NY NX"), einops.rearrange(trans_mats, "B C2 -> B 1 C2")) return gt_images, pred_gmm_images, pred_struc, mu, log_var def _shared_decoding(self, z): with torch.no_grad(): z = z.float().to(self.device) pred_deformation = self.model.decoder(z) pred_struc = self.deformer.transform(pred_deformation, self.gmm_centers) pred_struc = pred_struc.squeeze(0) return pred_struc def _save_batched_strucs(self, pred_strucs, save_path): ref_atom_arr = self.template_pdb.copy() atom_arrs = [] b = pred_strucs.shape[0] for i in range(b): tmp_struc = pred_strucs[i].cpu().numpy() tmp_atom_arr = ref_atom_arr.copy() tmp_atom_arr.coord = tmp_struc atom_arrs.append(tmp_atom_arr) bt_save_pdb(save_path, struc.stack(atom_arrs)) def _shared_image_check(self, total=25): mode = self.model.training # use validation or test set which not shuffled tmp_loader = self.trainer.val_dataloaders or self.trainer.test_dataloaders num = 0 gt_images_list = [] pred_gmm_images_list = [] self.model.eval() with torch.no_grad(): for batch in tmp_loader: batch = self.trainer.strategy.batch_to_device(batch) gt_images, pred_gmm_images, _, mu, log_var = self._shared_infer(batch) gt_images_list.append(gt_images) pred_gmm_images_list.append(pred_gmm_images) num += gt_images.shape[0] if num >= total: break self.model.train(mode=mode) gt_images_list = torch.cat(gt_images_list, dim=0)[:total] pred_gmm_images_list = torch.cat(pred_gmm_images_list, dim=0)[:total] save_dir = self._get_save_dir() save_tensor_image(gt_images_list, osp.join(save_dir, "input_image.png")) save_tensor_image(pred_gmm_images_list, osp.join(save_dir, "pred_gmm_image.png"), self.mask.mask) # standard hooks: def training_step(self, batch, batch_idx): cfg = self.cfg gt_images, pred_gmm_images, pred_struc, mu, log_var = self._shared_infer(batch) # gmm part loss # only gmm supervision should be low-passed if self.lp_mask2d is not None: lp_gt_images = self.low_pass_images(gt_images) else: lp_gt_images = gt_images gmm_proj_loss = calc_cor_loss(pred_gmm_images, lp_gt_images, self.mask) weighted_gmm_proj_loss = cfg.loss.gmm_cryoem_weight * gmm_proj_loss if hasattr(self, "connect_pairs"): connect_loss = calc_pair_dist_loss(pred_struc, self.connect_pairs, self.connect_dists) weighted_connect_loss = cfg.loss.connect_weight * connect_loss else: weighted_connect_loss = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "sse_pairs"): sse_loss = calc_pair_dist_loss(pred_struc, self.sse_pairs, self.sse_dists) weighted_sse_loss = cfg.loss.connect_weight * sse_loss else: weighted_sse_loss = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "dist_loss_fn"): dist_loss = self.dist_loss_fn(pred_struc) # across devices all_dist_loss = self.all_gather(dist_loss) # world_size, batch, num_pairs all_dist_loss = all_dist_loss.reshape(-1, dist_loss.shape[-1]) # chain-wise drop with torch.no_grad(): keep_mask = torch.ones(dist_loss.shape[-1], dtype=torch.bool).to(dist_loss.device) for i in range(len(self.cutoff_chain_mask)): tmp_mask = self.cutoff_chain_mask[i] tmp_var = all_dist_loss.index_select(dim=1, index=tmp_mask.nonzero(as_tuple=True)[0]).var(dim=0) intra_chain_keep_mask = tmp_var.lt(torch.quantile(tmp_var, cfg.loss.dist_keep_ratio)) keep_mask[tmp_mask] *= intra_chain_keep_mask keep_mask = keep_mask.unsqueeze(0).repeat(dist_loss.size(0), 1) dist_loss = torch.mean(dist_loss[keep_mask]) weighted_dist_loss = cfg.loss.dist_weight * dist_loss # dist_penalty = torch.mean(torch.abs(self.dist_loss_fn.get_weights())) # weighted_dist_penalty = cfg.loss.dist_penalty_weight * dist_penalty else: weighted_dist_loss = weighted_gmm_proj_loss.new_tensor(0.) # weighted_dist_penalty = weighted_gmm_proj_loss.new_tensor(0.) if hasattr(self, "clash_pairs"): clash_loss = calc_clash_loss(pred_struc, self.clash_pairs, cfg.loss.clash_min_cutoff) weighted_clash_loss = cfg.loss.clash_weight * clash_loss else: weighted_clash_loss = weighted_gmm_proj_loss.new_tensor(0.) # KL kl_loss = calc_kl_loss(mu, log_var, self.cfg.loss.free_bits) kl_beta = warmup(cfg.loss.warmup_step, upper=cfg.loss.kl_beta_upper)(self.global_step) weighted_kld_loss = kl_beta * kl_loss / self.mask.num_masked # clac loss loss = (weighted_kld_loss + weighted_gmm_proj_loss + weighted_connect_loss + weighted_dist_loss + weighted_sse_loss + weighted_clash_loss) tmp_metric = { "loss": loss.item(), "cryoem(gmm)": weighted_gmm_proj_loss.item(), "con": weighted_connect_loss.item(), "sse": weighted_sse_loss.item(), "dist": weighted_dist_loss.item(), # "dist_penalty": weighted_dist_penalty.item(), "clash": weighted_clash_loss.item(), "kld": weighted_kld_loss.item(), "kld(/dim)": kl_loss.item() } if self.global_step % cfg.runner.log_every_n_step == 0: self.log_dict(tmp_metric) log_to_current(f"epoch {self.current_epoch} [{batch_idx}/{self.trainer.num_training_batches}] | " + pretty_dict(tmp_metric, 5)) return loss def validation_step(self, batch, batch_idx): gt_images = batch["proj"] idxes = batch["idx"] # if self.lp_mask2d is not None: # gt_images = self.low_pass_images(gt_images) mu, log_var = self.model.encode(prepare_images(gt_images, self.cfg.model.input_space), idxes) z = mu self.validation_step_outputs.append({"z": z, "idx": idxes}) def on_validation_epoch_end(self): # lightning will automatically copy val samples to let val_loader to be divided by gpu_num with no remainder, # here use sample id to remove redundancy all_outputs = merge_step_outputs(self.validation_step_outputs) all_outputs = self.all_gather(all_outputs) all_outputs = squeeze_dict_outputs_1st_dim(all_outputs) if self.trainer.is_global_zero and len(all_outputs) > 0: # save projection images for checking self._shared_image_check() save_dir = self._get_save_dir() # -------- # dealing with all z indices = get_1st_unique_indices(all_outputs["idx"]) log_to_current(f"Total {len(indices)} unique samples") all_outputs = filter_outputs_by_indices(all_outputs, indices) z_list = all_outputs["z"] z_list = z_list.cpu().numpy() # (num_samples, latent_dim) np.save(osp.join(save_dir, "z.npy"), z_list) # -------- # Kmeans cluster kmeans_labels, centers = cluster_kmeans(z_list, self.cfg.analyze.cluster_k) centers, centers_ind = get_nearest_point(z_list, centers) if z_list.shape[-1] > 2 and not self.cfg.analyze.skip_umap: log_to_current("Running UMAP...")
z_emb, reducer = run_umap(z_list)
31
2023-11-06 07:15:26+00:00
24k
KAIST-AILab/palr
train.py
[ { "identifier": "BC", "path": "imitation/bc.py", "snippet": "class BC(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, envname=None, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(BC, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n self.device = device\n \n self.obs_dim = obs_dim\n self.action_dim = action_dim \n self.stacksize = stacksize\n \n self.policy_optimizer = optim.Adam(policy.parameters(), lr=lr)\n \n self.num_eval_iteration = 50\n self.envname = envname\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path \n \n # For standardization\n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000):\n \n max_score = -100000.\n \n batch_valid = self.replay_buffer_valid.random_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:] \n prev_expert_action_valid = batch_valid['actions'][:, :-self.action_dim] # For debugging\n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_expert_action_valid = torch.tensor(prev_expert_action_valid, dtype=torch.float32, device=self.device)\n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device) \n\n neg_likelihood = -self.policy.log_prob(obs, actions).mean()\n train_loss = neg_likelihood\n \n self.policy_optimizer.zero_grad()\n train_loss.backward()\n self.policy_optimizer.step()\n\n if (num+1) % eval_freq == 0:\n policy_action = self.policy(obs).sample()\n policy_action_valid = self.policy(obs_valid).sample()\n prev_expert_action = batch['actions'][:, :-self.action_dim] \n prev_expert_action = torch.tensor(prev_expert_action, dtype=torch.float32, device=self.device) \n \n # Train data HSCIC (for debugging) \n policy_embedding = self.policy.forward_embedding(obs)\n if self.standardize:\n Y_std = (prev_expert_action - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n p_std = (policy_action - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n p_std = p_std.to(torch.float32)\n else:\n Y_std = prev_expert_action\n Z_std = actions\n p_std = policy_action\n \n hscic_estimate = estimate_hscic(X=policy_embedding, Y=Y_std, Z=Z_std, ridge_lambda=1e-5)\n \n policy_embedding_valid = self.policy.forward_embedding(obs_valid)\n if self.standardize:\n Y_std = (prev_expert_action_valid - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions_valid - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n else:\n Y_std = prev_expert_action_valid\n Z_std = actions_valid\n p_std = policy_action\n \n valid_hscic_estimate = estimate_hscic(X=policy_embedding_valid, Y=Y_std, Z=Z_std, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_expert_action_valid, Z=actions_valid, ridge_lambda=1e-5)\n\n valid_neg_likelihood = -self.policy.log_prob(obs_valid, actions_valid).mean()\n valid_loss = valid_neg_likelihood\n\n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: train_policy_loss={train_loss.item():.2f}, val_policy_loss={valid_loss.item():.2f}, eval_ret={eval_ret_mean:.2f}+-{eval_ret_std:.2f} ({obs_valid.shape[0]})',)\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({'train_total_loss': train_loss.item(), \n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': neg_likelihood.item(),\n 'valid_neg_likelihood': valid_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep:\n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n \n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "RAP", "path": "imitation/rap.py", "snippet": "class RAP(nn.Module):\n # Implementation of Residual Action Prediction (ECCV 2022)\n # - https://arxiv.org/pdf/2207.09705.pdf\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, embedding_dim=1, stacksize=1, standardize=False\n ):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(RAP, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n \n self.device = device\n \n self.m_embedding_optimizer = optim.Adam(policy.history_embedding_params, lr=lr)\n self.h_embedding_optimizer = optim.Adam(policy.single_embedding_params, lr=lr)\n self.policy_optimizer = optim.Adam(policy.policy_params, lr=lr)\n self.residual_optimizer = optim.Adam(policy.residual_params, lr=lr)\n\n self.num_eval_iteration = 50 \n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.embedding_dim = embedding_dim\n self.stacksize = stacksize\n \n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000):\n \n max_score = -100000. \n min_loss = 100000. \n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim] \n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device) \n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n\n self.m_embedding_optimizer.zero_grad()\n self.residual_optimizer.zero_grad() \n \n # m : history embedding, h : single observation embedding\n m, _ = self.policy.forward_embedding(obs) \n action_residuals = actions - prev_actions\n action_residual_pred = self.policy.forward_residual_from_m(m)\n \n train_residual_loss = torch.mean((action_residual_pred - action_residuals) ** 2)\n train_residual_loss.backward()\n \n self.m_embedding_optimizer.step()\n self.residual_optimizer.step() \n \n self.policy_optimizer.zero_grad() \n self.h_embedding_optimizer.zero_grad() \n \n m, h = self.policy.forward_embedding(obs)\n \n # we follow the original implementation that stop-gradient layer on m ; \n # see `forward_policy_from_embedding` method for detail. (m.detach() in input)\n train_neg_likelihood = -self.policy.log_prob_policy_from_m_h(m, h, actions).mean()\n train_neg_likelihood.backward()\n \n self.policy_optimizer.step()\n self.h_embedding_optimizer.step()\n \n if (num+1) % eval_freq == 0: \n valid_m, valid_h = self.policy.forward_embedding(obs_valid) \n valid_action_residuals = actions_valid - prev_actions_valid\n valid_action_residual_pred = self.policy.forward_residual_from_m(valid_m)\n \n valid_policy_neg_likelihood = -self.policy.log_prob_policy_from_m_h(valid_m, valid_h, actions_valid).mean()\n valid_residual_loss = torch.mean((valid_action_residual_pred - valid_action_residuals) ** 2) \n \n valid_loss = valid_policy_neg_likelihood + valid_residual_loss\n \n policy_action_valid = self.policy(obs_valid).sample() \n \n train_mh = torch.cat([m,h], dim=-1)\n valid_mh = torch.cat([valid_m, valid_h], dim=-1)\n \n hscic_estimate = estimate_hscic(X=train_mh, Y=prev_actions, Z=actions, ridge_lambda=1e-5)\n valid_hscic_estimate = estimate_hscic(X=valid_mh, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5) \n train_hscic_m_a_given_aprev = estimate_hscic(X=m, Y=actions, Z=prev_actions, ridge_lambda=1e-5)\n valid_hscic_m_a_given_aprev = estimate_hscic(X=valid_m, Y=actions_valid, Z=prev_actions_valid, ridge_lambda=1e-5)\n \n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n train_loss = train_neg_likelihood + train_residual_loss\n \n print(f'** iter{num+1}: train_loss={train_loss.item()}, nll={train_neg_likelihood}, residual_loss={train_residual_loss}, eval_ret={eval_ret_mean}+-{eval_ret_std}')\n print(f' valid_loss={valid_loss.item()}, valid_nll={valid_policy_neg_likelihood}, valid_residual_loss={valid_residual_loss}')\n \n print(f'** HSCIC(mh, a_prev | a_current) : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n print(f'** HSCIC(m, a_current | a_prev) : (train){train_hscic_m_a_given_aprev:.6f} (valid){valid_hscic_m_a_given_aprev:.6f} ')\n \n if self.wandb:\n self.wandb.log({\n 'train_total_loss': train_loss.item(),\n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': train_neg_likelihood.item(),\n 'valid_neg_likelihood': valid_policy_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'train_residual_loss': train_residual_loss,\n 'valid_residual_loss': valid_residual_loss,\n 'train_mean_hscic(m,target|prev)': train_hscic_m_a_given_aprev,\n 'valid_mean_hscic(m,target|prev)': valid_hscic_m_a_given_aprev,\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep:\n # obs = obs[:true_obs_dim]\n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()[0]\n next_obs, rew, done, env_info = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "FCA", "path": "imitation/fca.py", "snippet": "class FCA(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True,\n embedding_dim=1, entropy_hidden_size=300, entropy_lr=1e-4, reg_coef=1e-5, info_bottleneck_loss_coef=0.001, \n ):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(FCA, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid \n self.device = device\n \n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.embedding_dim = embedding_dim\n self.stacksize = stacksize \n\n # Additional Network for Conditional Entropy (FCA)\n self.entropy_input_size = embedding_dim + action_dim\n self.entropy_hidden_size = entropy_hidden_size\n self.entropy_net = nn.Sequential(\n nn.Linear(self.entropy_input_size, self.entropy_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(self.entropy_hidden_size, action_dim, device=self.device)\n )\n \n # FCA Hyperparameters\n self.entropy_coef = reg_coef \n self.info_bottleneck_loss_coef = info_bottleneck_loss_coef \n \n self.embedding_optimizer = optim.Adam(policy.embedding_params, lr=lr)\n self.policy_optimizer = optim.Adam(policy.policy_params, lr=lr)\n self.entropy_optimizer = optim.Adam(self.entropy_net.parameters(), lr=entropy_lr)\n\n self.num_eval_iteration = 50\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n\n # For standardization\n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000, inner_steps=1):\n \n max_score = -100000. \n min_loss = 100000. \n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim] \n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device) \n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n\n # conditional entropy input : H(a_{t-1}| a_{t}, varphi_t)\n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1) \n \n self.policy_optimizer.zero_grad()\n self.embedding_optimizer.zero_grad()\n self.entropy_optimizer.zero_grad()\n\n if self.entropy_coef > 0.:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n\n # prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n pred_prev_actions = self.entropy_net(expert_action_and_h) \n entropy_loss = torch.mean((pred_prev_actions - prev_actions) ** 2) \n\n train_loss = neg_likelihood \\\n - self.entropy_coef * entropy_loss \\\n + self.info_bottleneck_loss_coef * info_bottleneck_loss\n \n train_loss.backward() # backprop embedding\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step()\n\n # conditional entropy training\n for _ in range(inner_steps):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device) \n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n \n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1) \n\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device) \n pred_prev_actions = self.entropy_net(expert_action_and_h.detach())\n\n entropy_loss = torch.mean((pred_prev_actions - prev_actions) ** 2)\n \n self.entropy_optimizer.zero_grad()\n entropy_loss.backward()\n self.entropy_optimizer.step()\n\n else:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n \n train_loss = neg_likelihood + self.info_bottleneck_loss_coef * info_bottleneck_loss \n \n train_loss.backward()\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step() \n \n\n if (num+1) % eval_freq == 0: \n h_valid = self.policy.forward_embedding(obs_valid)\n valid_info_bottleneck_loss = 0.5 * (h_valid ** 2).sum()\n \n if self.entropy_coef > 0:\n expert_action_and_h_valid = torch.cat([actions_valid, h_valid], dim=-1) \n pred_prev_actions_valid = self.entropy_net(expert_action_and_h_valid)\n \n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim]\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device)\n \n valid_entropy_loss = torch.mean((pred_prev_actions_valid - prev_actions_valid) ** 2)\n else:\n valid_entropy_loss = 0.\n \n valid_neg_likelihood = - self.policy.log_prob(obs_valid, actions_valid).mean()\n \n valid_loss = valid_neg_likelihood \\\n - self.entropy_coef * valid_entropy_loss \\\n + self.info_bottleneck_loss_coef * valid_info_bottleneck_loss\n \n policy_action_valid = self.policy(obs_valid).sample() \n h_train = self.policy.forward_embedding(obs)\n \n hscic_estimate = estimate_hscic(X=h_train, Y=prev_actions, Z=actions, ridge_lambda=1e-5)\n valid_hscic_estimate = estimate_hscic(X=h_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n \n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: entropy_loss={entropy_loss}, train_loss={train_loss.item()}, eval_ret={eval_ret_mean}+-{eval_ret_std} ')\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({\n 'train_total_loss': train_loss.item(), \n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': neg_likelihood.item(), \n 'valid_neg_likelihood': valid_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'valid_entropy_loss': entropy_loss, \n 'valid_IB_loss': info_bottleneck_loss.item(),\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep: \n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n \n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "MINE_BC", "path": "imitation/mine.py", "snippet": "class MINE_BC(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True,\n embedding_dim=1, mine_lr=1e-4, reg_coef=1e-5, info_bottleneck_loss_coef=0.001, \n ):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(MINE_BC, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n self.device = device\n \n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.embedding_dim = embedding_dim\n self.stacksize = stacksize\n \n # Additional Network for MINE Neural Estimator\n self.mine = MINE_DV(action_dim, action_dim + embedding_dim, device=device)\n \n # MINE-BC Hyperparameters\n self.reg_coef = reg_coef\n self.info_bottleneck_loss_coef = info_bottleneck_loss_coef\n\n self.embedding_optimizer = optim.Adam(policy.embedding_params, lr=lr)\n self.policy_optimizer = optim.Adam(policy.policy_params, lr=lr)\n self.mine_optimizer = optim.Adam(self.mine.parameters(), lr=mine_lr)\n \n self.num_eval_iteration = 50\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n\n # For standardization \n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000, inner_steps=1):\n \n min_loss = 100000.\n max_score = -100000.\n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim] \n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device)\n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n\n # MINE : I (a_{t-1}; a_{t}, varphi_t)\n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1)\n \n self.policy_optimizer.zero_grad()\n self.embedding_optimizer.zero_grad()\n self.mine_optimizer.zero_grad()\n\n if self.reg_coef > 0:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n mi_estimate = self.mine.get_mi_bound(prev_actions, expert_action_and_h, update_ema=False)\n\n train_loss = neg_likelihood \\\n + self.reg_coef * mi_estimate \\\n + self.info_bottleneck_loss_coef * info_bottleneck_loss\n \n train_loss.backward()\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step()\n\n # MINE training\n for _ in range(inner_steps):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n\n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n \n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1)\n \n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n \n mine_loss = -self.mine.get_mi_bound(prev_actions, expert_action_and_h.detach(), update_ema=True)\n\n self.mine_optimizer.zero_grad()\n mine_loss.backward()\n self.mine_optimizer.step()\n\n else:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n \n train_loss = neg_likelihood + self.info_bottleneck_loss_coef * info_bottleneck_loss \n \n train_loss.backward()\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step()\n \n\n if (num+1) % eval_freq == 0:\n h_valid = self.policy.forward_embedding(obs_valid)\n valid_info_bottleneck_loss = 0.5 * (h_valid ** 2).sum()\n \n if self.reg_coef > 0:\n expert_action_and_h_valid = torch.cat([actions_valid, h_valid], dim=-1) \n valid_mi_estimate = self.mine.get_mi_bound(prev_actions_valid, expert_action_and_h_valid, update_ema=False)\n else:\n valid_mi_estimate = 0.\n \n valid_neg_likelihood = -self.policy.log_prob(obs_valid, actions_valid).mean()\n\n valid_loss = valid_neg_likelihood \\\n + self.reg_coef * valid_mi_estimate \\\n + self.info_bottleneck_loss_coef * valid_info_bottleneck_loss\n \n policy_action_valid = self.policy(obs_valid).sample() \n h_train = self.policy.forward_embedding(obs)\n \n hscic_estimate = estimate_hscic(X=h_train, Y=prev_actions, Z=actions, ridge_lambda=1e-5)\n valid_hscic_estimate = estimate_hscic(X=h_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n \n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: mine_loss={-mi_estimate.cpu().item()}, train_loss={train_loss.item()}, eval_ret={eval_ret_mean}+-{eval_ret_std} ')\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({\n 'train_total_loss': train_loss.cpu().item(),\n 'valid_total_loss': valid_loss.cpu().item(),\n 'train_neg_likelihood': neg_likelihood.cpu().item(),\n 'valid_neg_likelihood': valid_neg_likelihood.cpu().item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'valid_mine_loss': -mi_estimate.cpu().item(),\n 'valid_IB_loss': info_bottleneck_loss.cpu().item(),\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep: \n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "PALR", "path": "imitation/palr.py", "snippet": "class PALR(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True,\n reg_coef=0.01, ridge_lambda=1e-3):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(PALR, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n self.device = device\n\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.stacksize = stacksize\n \n self.policy_optimizer = optim.Adam(self.policy.parameters(), lr=lr) \n \n self.num_eval_iteration = 50\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n \n # HSCIC Hyperparameters\n self.reg_coef = reg_coef\n self.ridge_lambda = ridge_lambda\n \n # For standardization\n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000):\n \n min_loss = 100000.\n max_score = -100000.\n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations'] \n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_expert_action_valid = batch_valid['actions'][:, :-self.action_dim]\n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_expert_action_valid = torch.tensor(prev_expert_action_valid, dtype=torch.float32, device=self.device)\n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n\n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_expert_action = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_expert_action = torch.tensor(prev_expert_action, dtype=torch.float32, device=self.device)\n\n neg_likelihood = - self.policy.log_prob(obs, actions).mean() \n policy_action = self.policy(obs).rsample()\n \n if self.reg_coef != 0: \n policy_embedding = self.policy.forward_embedding(obs)\n if self.standardize:\n Y_std = (prev_expert_action - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n else:\n Y_std = prev_expert_action\n Z_std = actions\n \n hscic_estimate = estimate_hscic(X=policy_embedding, Y=Y_std, Z=Z_std, ridge_lambda=self.ridge_lambda)\n \n else:\n hscic_estimate = 0.\n \n train_loss = neg_likelihood + self.reg_coef * hscic_estimate \n\n self.policy_optimizer.zero_grad()\n train_loss.backward()\n self.policy_optimizer.step()\n\n if (num+1) % eval_freq == 0:\n policy_action = self.policy(obs).sample()\n policy_action_valid = self.policy(obs_valid).sample()\n \n # Train data HSCIC (for debugging) \n policy_embedding = self.policy.forward_embedding(obs)\n if self.standardize:\n Y_std = (prev_expert_action - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n p_std = (policy_action - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n p_std = p_std.to(torch.float32)\n \n else:\n Y_std = prev_expert_action\n Z_std = actions\n p_std = policy_action\n \n hscic_estimate = estimate_hscic(X=policy_embedding, Y=Y_std, Z=Z_std, ridge_lambda=self.ridge_lambda)\n \n policy_embedding_valid = self.policy.forward_embedding(obs_valid)\n if self.standardize:\n Y_std = (prev_expert_action_valid - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions_valid - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n else:\n Y_std = prev_expert_action_valid\n Z_std = actions_valid\n p_std = policy_action\n \n valid_hscic_estimate = estimate_hscic(X=policy_embedding_valid, Y=Y_std, Z=Z_std, ridge_lambda=self.ridge_lambda) \n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_expert_action_valid, Z=actions_valid, ridge_lambda=self.ridge_lambda)\n\n valid_neg_likelihood = -self.policy.log_prob(obs_valid, actions_valid).mean()\n valid_loss = valid_neg_likelihood + self.reg_coef * valid_hscic_estimate\n\n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: train_policy_loss={train_loss.item():.2f}, val_policy_loss={valid_loss.item():.2f}, eval_ret={eval_ret_mean:.2f}+-{eval_ret_std:.2f}',)\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({'train_total_loss': train_loss.item(), \n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': neg_likelihood.item(),\n 'valid_neg_likelihood': valid_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep:\n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n \n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "TanhGaussianPolicyWithEmbedding", "path": "core/policy.py", "snippet": "class TanhGaussianPolicyWithEmbedding(TorchStochasticPolicy):\n \"\"\"\n Reference : \n https://github.com/AlvinWen428/fighting-copycat-agents/blob/52dabfd8b1c42e50f31d84bd431915aad62e09cb/imitation_learning/models/gan_model/__init__.py#L9\n \n Usage:\n\n ```\n policy = TanhGaussianPolicy(...)\n \"\"\"\n\n def __init__(\n self,\n obs_dim,\n action_dim,\n embedding_dim,\n embedding_hidden_size,\n policy_hidden_size, \n policy_std=None,\n disc_std=None,\n init_w=1e-3,\n device='cpu',\n hidden_activation=F.leaky_relu, \n layer_norm=False,\n **kwargs\n ):\n if device =='cuda':\n ptu.set_gpu_mode(True)\n self.device = device\n \n super(TanhGaussianPolicyWithEmbedding, self).__init__()\n # hidden_sizes,\n # input_size=obs_dim,\n # output_size=action_dim,\n # init_w=init_w,\n # device=device,\n # **kwargs\n # )\n\n self.input_size = obs_dim\n self.output_size = action_dim\n self.hidden_activation = hidden_activation\n self.layer_norm = layer_norm\n\n self.embedding_params = []\n self.disc_params = []\n self.policy_params = []\n\n self.embed_fcs = []\n # self.embed_layer_norms = []\n\n self.policy_fcs = []\n # self.policy_layer_norms = []\n\n self.disc_fcs = []\n # self.disc_layer_norms = []\n \n self.device = device\n in_size = self.input_size\n\n self.embed_fcs = nn.Sequential(\n nn.Linear(self.input_size, embedding_hidden_size, bias=False, device=self.device),\n # nn.BatchNorm1d(embedding_hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(embedding_hidden_size, embedding_dim, device=self.device), \n )\n self.embedding_params = self.embed_fcs.parameters()\n\n self.policy_fcs = nn.Sequential(\n nn.LeakyReLU(0.2, inplace=False),\n nn.Linear(embedding_dim, policy_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n )\n # self.policy_params.append({'params': self.policy_fcs.parameters()})\n self.policy_mean = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n self.policy_params.append({'params': self.policy_mean.parameters()}) \n \n # self.policy_fc1 = nn.Linear(embedding_dim, policy_hidden_size, device=self.device)\n # self.policy_fc1.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc1.bias.data.fill_(0)\n # self.policy_params.append({'params': self.policy_fc1.parameters()}) \n # self.policy_fc2 = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n # self.policy_fc2.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc2.bias.data.fill_(0)\n # self.policy_params.append({'params': self.policy_fc2.parameters()}) \n\n self.policy_log_std = None\n self.policy_std = policy_std\n \n if policy_std is None:\n self.policy_fc_log_std = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n # self.policy_fc_log_std.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc_log_std.bias.data.uniform_(-init_w, init_w)\n self.policy_params.append({'params': self.policy_fc_log_std.parameters()})\n else:\n self.policy_log_std = np.log(policy_std)\n assert LOG_SIG_MIN <= self.policy_log_std <= LOG_SIG_MAX\n\n def forward(self, obs):\n # h = obs\n\n # h = self.hidden_activation(self.embed_fc1(h))\n # h = self.embed_fc2(h)\n\n # h = self.hidden_activation(self.policy_fc1(h))\n # policy_mean = self.policy_fc2(h)\n\n h = self.embed_fcs(obs)\n h = self.policy_fcs(h)\n policy_mean = self.policy_mean(h)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(h)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n return TanhNormal(policy_mean, policy_std)\n\n def forward_embedding(self, obs):\n # h = obs\n \n # h = self.hidden_activation(self.embed_fc1(h))\n # h = self.embed_fc2(h)\n h = self.embed_fcs(obs)\n\n return h\n\n def forward_policy_from_embedding(self, h):\n # h = self.hidden_activation(h)\n # h = self.hidden_activation(self.policy_fc1(h))\n h = self.policy_fcs(h)\n policy_mean = self.policy_mean(h)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(h)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n return TanhNormal(policy_mean, policy_std)\n\n def logprob(self, action, mean, std):\n tanh_normal = TanhNormal(mean, std)\n log_prob = tanh_normal.log_prob(\n action,\n )\n log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n\n def log_prob(self, obs, action):\n tanh_normal = self.forward(obs)\n log_prob = tanh_normal.log_prob(\n action,\n )\n # log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n\n def log_prob_policy_from_embedding(self, h, action):\n tanh_normal = self.forward_policy_from_embedding(h)\n log_prob = tanh_normal.log_prob(\n action,\n )\n # log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n\n def predict_action_from_embedding(self, h):\n tanh_normal = self.forward_policy_from_embedding(h)\n pred_action = tanh_normal.mean \n # log_prob = log_prob.sum(dim=1, keepdim=True)\n return pred_action" }, { "identifier": "TanhGaussianRAPPolicy", "path": "core/policy.py", "snippet": "class TanhGaussianRAPPolicy(TorchStochasticPolicy):\n \"\"\"\n Reference : \n \n Usage:\n\n ```\n policy = TanhGaussianPolicy(...)\n \"\"\"\n\n def __init__(\n self,\n obs_dim,\n stack_size,\n action_dim,\n embedding_dim,\n embedding_hidden_size,\n policy_hidden_size,\n residual_hidden_size,\n policy_std=None,\n residual_std=0.1,\n device='cpu',\n hidden_activation=F.leaky_relu, \n layer_norm=False,\n **kwargs\n ):\n if device =='cuda':\n ptu.set_gpu_mode(True)\n self.device = device\n \n super(TanhGaussianRAPPolicy, self).__init__()\n \n self.input_size = obs_dim\n self.stack_size = stack_size\n self.output_size = action_dim\n self.hidden_activation = hidden_activation\n self.layer_norm = layer_norm\n\n self.embedding_params = []\n self.residual_params = []\n self.policy_params = []\n\n self.history_embed_fcs = []\n self.single_embed_fcs = []\n # self.embed_layer_norms = []\n\n self.policy_fcs = []\n self.residual_fcs = []\n \n self.device = device\n in_size = self.input_size\n\n self.history_embed_fcs = nn.Sequential(\n nn.Linear(self.input_size * self.stack_size, embedding_hidden_size, bias=False, device=self.device),\n # nn.BatchNorm1d(embedding_hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(embedding_hidden_size, embedding_dim, device=self.device)\n )\n self.history_embedding_params = self.history_embed_fcs.parameters()\n \n self.single_embed_fcs = nn.Sequential(\n nn.Linear(self.input_size, embedding_hidden_size, bias=False, device=self.device),\n # nn.BatchNorm1d(embedding_hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(embedding_hidden_size, embedding_dim, device=self.device)\n )\n self.single_embedding_params = self.single_embed_fcs.parameters()\n\n self.policy_fcs = nn.Sequential(\n nn.LeakyReLU(0.2, inplace=False),\n nn.Linear(embedding_dim*2, policy_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n )\n self.policy_params.append({'params': self.policy_fcs.parameters()})\n self.policy_mean = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n self.policy_params.append({'params': self.policy_mean.parameters()}) \n\n self.policy_log_std = None\n self.policy_std = policy_std\n \n if policy_std is None:\n self.policy_fc_log_std = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n # self.policy_fc_log_std.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc_log_std.bias.data.uniform_(-init_w, init_w)\n self.policy_params.append({'params': self.policy_fc_log_std.parameters()})\n else:\n self.policy_log_std = np.log(policy_std)\n assert LOG_SIG_MIN <= self.policy_log_std <= LOG_SIG_MAX\n\n self.residual_fcs = nn.Sequential(\n # nn.LeakyReLU(0.2, inplace=False),\n nn.Linear(embedding_dim, residual_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n )\n self.residual_params.append({'params': self.residual_fcs.parameters()})\n self.residual_mean = nn.Linear(residual_hidden_size, action_dim, device=self.device) \n self.residual_params.append({'params': self.residual_mean.parameters()})\n\n def forward(self, obs):\n if len(obs.shape) < 2:\n obs = obs[None]\n \n obs_total = obs\n obs_current = obs[:, -self.input_size:]\n\n m = self.history_embed_fcs(obs_total)\n h = self.single_embed_fcs(obs_current) \n \n policy_input = torch.cat([m.detach(), h], dim=-1)\n \n policy_input = self.policy_fcs(policy_input)\n policy_mean = self.policy_mean(policy_input)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(policy_input)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n policy_dist = TanhNormal(policy_mean, policy_std) \n \n return policy_dist #, residual_dist\n\n def forward_embedding(self, obs):\n obs_total = obs\n obs_current = obs[:, -self.input_size:]\n\n m = self.history_embed_fcs(obs_total)\n h = self.single_embed_fcs(obs_current)\n\n return m, h\n\n def forward_residual_from_m(self, m):\n residual_m = self.residual_fcs(m)\n residual_mean = self.residual_mean(residual_m) \n \n return residual_mean\n\n def forward_policy_from_embedding(self, m, h):\n policy_input = torch.cat([m.detach(), h], dim=-1)\n \n policy_input = self.policy_fcs(policy_input)\n policy_mean = self.policy_mean(policy_input)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(policy_input)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n return TanhNormal(policy_mean, policy_std)\n\n def logprob(self, action, mean, std):\n tanh_normal = TanhNormal(mean, std)\n log_prob = tanh_normal.log_prob(action)\n log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n \n def log_prob(self, obs, action):\n tanh_normal = self.forward(obs)\n log_prob = tanh_normal.log_prob(action) \n return log_prob\n \n def log_prob_policy_from_m_h(self, m, h, action): \n tanh_normal = self.forward_policy_from_embedding(m, h)\n log_prob = tanh_normal.log_prob(action)\n return log_prob\n\n def predict_action_from_m_h(self, m, h):\n tanh_normal = self.forward_policy_from_embedding(m, h)\n pred_action = tanh_normal.mean \n return pred_action" }, { "identifier": "EnvReplayBuffer", "path": "core/replay_buffer.py", "snippet": "class EnvReplayBuffer(SimpleReplayBuffer):\n def __init__(\n self,\n max_replay_buffer_size,\n env,\n stack_size=1,\n action_history_len=0,\n env_info_sizes=None,\n train_with_action_history=False\n ):\n \"\"\"\n :param max_replay_buffer_size:\n :param env:\n \"\"\"\n self.env = env\n self._ob_space = env.observation_space #.shape[0] * stack_size\n self._action_space = env.action_space\n\n if train_with_action_history:\n obs_dim = get_dim(self._ob_space) * stack_size + get_dim(self._action_space) * max(stack_size - 1, 1)\n else:\n obs_dim = get_dim(self._ob_space) * stack_size\n\n act_dim = get_dim(self._action_space) * (action_history_len)\n\n if env_info_sizes is None:\n if hasattr(env, 'info_sizes'):\n env_info_sizes = env.info_sizes\n else:\n env_info_sizes = dict()\n\n super().__init__(\n max_replay_buffer_size=max_replay_buffer_size,\n observation_dim=obs_dim,\n action_dim=act_dim,\n env_info_sizes=env_info_sizes\n )\n\n self.obs_mean = None\n self.obs_std = None\n\n self.act_mean = None\n self.act_std = None\n\n # def add_sample(self, observation, action, prev_action, reward, terminal,\n # next_observation, **kwargs):\n # if isinstance(self._action_space, Discrete):\n # new_action = np.zeros(self._action_dim)\n # new_action[action] = 1\n # else:\n # new_action = action\n\n # return super().add_sample(\n # observation=observation,\n # action=new_action,\n # prev_action=prev_action,\n # reward=reward,\n # next_observation=next_observation,\n # terminal=terminal,\n # # **kwargs\n # )\n\n def calculate_statistics(self):\n self.obs_mean = np.mean(self._observations[:self._top], axis=0, keepdims=True)\n self.obs_std = np.std(self._observations[:self._top], axis=0, keepdims=True)\n\n self.act_mean = np.mean(self._actions[:self._top], axis=0, keepdims=True)\n self.act_std = np.std(self._actions[:self._top], axis=0, keepdims=True)\n\n return self.obs_mean, self.obs_std, self.act_mean, self.act_std\n\n def set_statistics(self, obs_mean, obs_std, act_mean, act_std):\n self.obs_mean, self.obs_std, self.act_mean, self.act_std = obs_mean, obs_std, act_mean, act_std\n \n def get_statistics(self):\n return self.obs_mean, self.obs_std, self.act_mean, self.act_std\n\n def random_batch(self, batch_size, standardize=False):\n indices = np.random.choice(self._size, size=batch_size, replace=self._replace or self._size < batch_size)\n if not self._replace and self._size < batch_size:\n warnings.warn('Replace was set to false, but is temporarily set to true because batch size is larger than current size of replay.')\n\n if standardize and self.obs_mean is not None:\n obss = (self._observations[indices] - self.obs_mean) / self.obs_std\n # actions = (self._actions[indices] - self.act_mean) / self.act_std\n next_obss = (self._next_obs[indices] - self.obs_mean) / self.obs_std\n else:\n obss = self._observations[indices] \n # actions = self._actions[indices] \n next_obss = self._next_obs[indices]\n\n actions = self._actions[indices]\n \n batch = dict(\n observations=obss,\n actions=actions,\n # prev_actions=self._prev_actions[indices],\n rewards=self._rewards[indices],\n terminals=self._terminals[indices],\n next_observations=next_obss,\n )\n for key in self._env_info_keys:\n assert key not in batch.keys()\n batch[key] = self._env_infos[key][indices]\n\n return batch\n \n def get_batch(self, batch_size, standardize=False):\n datasize = min(batch_size, self._top) \n indices = np.arange(datasize)\n # if not self._replace and self._size < batch_size:\n # warnings.warn('Replace was set to false, but is temporarily set to true because batch size is larger than current size of replay.')\n\n if standardize and self.obs_mean is not None:\n obss = (self._observations[indices] - self.obs_mean) / self.obs_std\n # actions = (self._actions[indices] - self.act_mean) / self.act_std\n next_obss = (self._next_obs[indices] - self.obs_mean) / self.obs_std\n else:\n obss = self._observations[indices] \n # actions = self._actions[indices] \n next_obss = self._next_obs[indices]\n\n actions = self._actions[indices]\n \n batch = dict(\n observations=obss,\n actions=actions,\n # prev_actions=self._prev_actions[indices],\n rewards=self._rewards[indices],\n terminals=self._terminals[indices],\n next_observations=next_obss,\n )\n for key in self._env_info_keys:\n assert key not in batch.keys()\n batch[key] = self._env_infos[key][indices]\n\n return batch\n\n def add_sample(self, observation, action, reward, terminal,\n next_observation, **kwargs):\n if isinstance(self._action_space, Discrete):\n new_action = np.zeros(self._action_dim)\n new_action[action] = 1\n else:\n new_action = action\n\n return super().add_sample(\n observation=observation,\n action=new_action,\n reward=reward,\n next_observation=next_observation,\n terminal=terminal,\n # **kwargs\n )" }, { "identifier": "preprocess_dataset_with_prev_actions", "path": "core/preprocess.py", "snippet": "def preprocess_dataset_with_prev_actions(mdpfile, envtype, stacksize=1, partially_observable=False, action_history_len=2):\n \n indx = list(np.arange(20))\n # Indices of position information observations\n if partially_observable:\n envtype_to_idx = {\n 'hopper': indx[:5], \n 'ant': indx[:13], \n 'walker2d': indx[:8], \n 'halfcheetah': indx[:4] + indx[8:13]\n }\n obs_idx = envtype_to_idx[envtype]\n observations = np.array(mdpfile['observations'])[:, obs_idx]\n next_observations = np.array(mdpfile['next_observations'])[:, obs_idx]\n else:\n observations = np.array(mdpfile['observations'])\n next_observations = np.array(mdpfile['next_observations'])\n \n terminals = np.array(mdpfile['terminals'])\n timeouts = np.array(mdpfile['timeouts'])\n rewards = np.array(mdpfile['rewards'])\n actions = np.array(mdpfile['actions'])\n\n obs_dim = observations.shape[-1]\n action_dim = actions.shape[-1]\n\n n_data = observations.shape[0]\n new_observations_list = []\n new_next_observations_list = []\n prev_action_list = []\n action_history_list = []\n \n idx_from_initial_state = 0\n num_trajs = 0\n\n for i in range(n_data):\n if idx_from_initial_state == 0:\n prev_action = np.zeros(action_dim)\n else:\n prev_action = actions[i-1]\n prev_action_list.append(prev_action)\n\n if idx_from_initial_state < stacksize:\n if idx_from_initial_state == 0:\n initial_obs = observations[i]\n \n new_observation = np.zeros(obs_dim * stacksize)\n new_observation_ = np.concatenate(observations[i-idx_from_initial_state: i+1])\n new_observation[-(idx_from_initial_state+1) * obs_dim:] = new_observation_\n \n new_next_observation = np.zeros(obs_dim * stacksize)\n new_next_observation_ = np.concatenate(next_observations[i-idx_from_initial_state: i+1])\n new_next_observation[-(idx_from_initial_state+1) * obs_dim:] = new_next_observation_\n \n if idx_from_initial_state + 1 != stacksize:\n new_next_observation[-(idx_from_initial_state+2) * obs_dim:-(idx_from_initial_state+1) * obs_dim] \\\n = initial_obs\n \n else:\n new_observation = np.concatenate(observations[i+1-stacksize:i+1])\n new_next_observation = np.concatenate(next_observations[i+1-stacksize:i+1])\n\n if idx_from_initial_state < action_history_len:\n action_history = np.zeros(action_dim * action_history_len)\n action_history_ = np.concatenate(actions[i-idx_from_initial_state: i+1])\n action_history[-(idx_from_initial_state+1) * action_dim:] = action_history_\n \n else:\n action_history = np.concatenate(actions[i+1-action_history_len:i+1])\n\n\n new_observations_list.append(new_observation)\n new_next_observations_list.append(new_next_observation)\n action_history_list.append(action_history)\n\n idx_from_initial_state += 1\n if terminals[i] or timeouts[i]:\n idx_from_initial_state = 0\n num_trajs += 1 \n\n new_observations = np.array(new_observations_list)\n new_next_observations = np.array(new_next_observations_list)\n new_actions = np.array(action_history_list)\n\n new_paths = {\n 'observations': new_observations,\n 'next_observations': new_next_observations,\n 'rewards': rewards,\n 'actions': new_actions,\n 'terminals': terminals,\n 'timeouts': timeouts \n }\n \n return new_paths" }, { "identifier": "data_select_num_transitions", "path": "core/preprocess.py", "snippet": "def data_select_num_transitions(path, num_transitions=1000, start_idx=0, random=False):\n new_path = {}\n \n if random:\n num_full_trajs = len(path['observations'])\n choice_idx = np.random.choice(num_full_trajs, num_transitions)\n \n else:\n choice_idx = np.arange(start_idx, start_idx + num_transitions)\n \n for key in path.keys():\n new_path[key] = np.array(path[key])[choice_idx]\n \n return new_path" }, { "identifier": "NormalizedBoxEnv", "path": "rlkit/envs/wrappers.py", "snippet": "class NormalizedBoxEnv(ProxyEnv):\n \"\"\"\n Normalize action to in [-1, 1].\n\n Optionally normalize observations and scale reward.\n \"\"\"\n\n def __init__(\n self,\n env,\n reward_scale=1.,\n obs_mean=None,\n obs_std=None,\n ):\n ProxyEnv.__init__(self, env)\n self._should_normalize = not (obs_mean is None and obs_std is None)\n if self._should_normalize:\n if obs_mean is None:\n obs_mean = np.zeros_like(env.observation_space.low)\n else:\n obs_mean = np.array(obs_mean)\n if obs_std is None:\n obs_std = np.ones_like(env.observation_space.low)\n else:\n obs_std = np.array(obs_std)\n self._reward_scale = reward_scale\n self._obs_mean = obs_mean\n self._obs_std = obs_std\n ub = np.ones(self._wrapped_env.action_space.shape)\n self.action_space = Box(-1 * ub, ub)\n\n def estimate_obs_stats(self, obs_batch, override_values=False):\n if self._obs_mean is not None and not override_values:\n raise Exception(\"Observation mean and std already set. To \"\n \"override, set override_values to True.\")\n self._obs_mean = np.mean(obs_batch, axis=0)\n self._obs_std = np.std(obs_batch, axis=0)\n\n def _apply_normalize_obs(self, obs):\n return (obs - self._obs_mean) / (self._obs_std + 1e-8)\n\n def step(self, action):\n lb = self._wrapped_env.action_space.low\n ub = self._wrapped_env.action_space.high\n scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)\n scaled_action = np.clip(scaled_action, lb, ub)\n\n wrapped_step = self._wrapped_env.step(scaled_action)\n next_obs, reward, done, info = wrapped_step\n if self._should_normalize:\n next_obs = self._apply_normalize_obs(next_obs)\n return next_obs, reward * self._reward_scale, done, info\n\n def __str__(self):\n return \"Normalized: %s\" % self._wrapped_env" } ]
import os import wandb import envs import d4rl import gym import torch from imitation.bc import BC from imitation.rap import RAP from imitation.fca import FCA from imitation.mine import MINE_BC from imitation.palr import PALR from argparse import ArgumentParser from itertools import product from core.policy import TanhGaussianPolicyWithEmbedding, TanhGaussianRAPPolicy from core.replay_buffer import EnvReplayBuffer from core.preprocess import preprocess_dataset_with_prev_actions, data_select_num_transitions from rlkit.envs.wrappers import NormalizedBoxEnv
20,485
trainer.train(total_iteration=configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'RAP' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianRAPPolicy( obs_dim=obs_dim, stack_size=stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], residual_hidden_size=configs['additional_network_size'], device=device, ) best_policy = TanhGaussianRAPPolicy( obs_dim=obs_dim, stack_size=stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], residual_hidden_size=configs['additional_network_size'], device=device, ) trainer = RAP( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, embedding_dim = embedding_dim, stacksize = stacksize, wandb = wandb, standardize=configs['standardize'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'FCA' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) trainer = FCA( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], wandb = wandb, save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, stacksize = stacksize, standardize=configs['standardize'], embedding_dim = embedding_dim, entropy_hidden_size = configs['additional_network_size'], entropy_lr = configs['inner_lr'], reg_coef = configs['reg_coef'], info_bottleneck_loss_coef = configs['info_bottleneck_loss_coef'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num'], inner_steps = configs['inner_steps'],) elif 'MINE' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, )
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs): env = NormalizedBoxEnv(gym.make(configs['envname'])) obs_dim = env.observation_space.low.size action_dim = env.action_space.low.size d4rl_env = gym.make(configs['d4rl_env_name']) stacksize = configs['stacksize'] if stacksize == 0: stacksize = 1 device = 'cuda' if torch.cuda.is_available() else 'cpu' envname, envtype = configs['envname'], configs['envtype'] traj_load_path = configs['traj_load_path'] print(f'-- Loading dataset from {traj_load_path}...') dataset = d4rl_env.get_dataset() print(f'-- Done!') print(f'-- Preprocessing dataset... ({envtype}, {stacksize})') path = preprocess_dataset_with_prev_actions(dataset, envtype, stacksize, configs['partially_observable'], action_history_len=2) train_data = data_select_num_transitions(path, configs['train_data_num']) valid_data = data_select_num_transitions(path, configs['valid_data_num'], start_idx=900000) replay_buffer = EnvReplayBuffer( configs['replay_buffer_size'], env, stacksize, action_history_len=2 ) replay_buffer.add_path(train_data) replay_buffer_valid = EnvReplayBuffer( configs['replay_buffer_size'], env, stacksize, action_history_len=2 ) replay_buffer_valid.add_path(valid_data) if configs['standardize']: obs_mean, obs_std, act_mean, act_std = replay_buffer.calculate_statistics() replay_buffer_valid.set_statistics(obs_mean, obs_std, act_mean, act_std) # to use wandb, initialize here, e.g. # wandb.init(project='palr', dir=wandb_dir, config=configs) wandb = None if 'BC' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device ) trainer = BC( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, envname = envname, lr = configs['lr'], save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, stacksize = stacksize, wandb = wandb, standardize=configs['standardize'] ) trainer.train(total_iteration=configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'RAP' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianRAPPolicy( obs_dim=obs_dim, stack_size=stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], residual_hidden_size=configs['additional_network_size'], device=device, ) best_policy = TanhGaussianRAPPolicy( obs_dim=obs_dim, stack_size=stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], residual_hidden_size=configs['additional_network_size'], device=device, ) trainer = RAP( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, embedding_dim = embedding_dim, stacksize = stacksize, wandb = wandb, standardize=configs['standardize'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'FCA' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) trainer = FCA( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], wandb = wandb, save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, stacksize = stacksize, standardize=configs['standardize'], embedding_dim = embedding_dim, entropy_hidden_size = configs['additional_network_size'], entropy_lr = configs['inner_lr'], reg_coef = configs['reg_coef'], info_bottleneck_loss_coef = configs['info_bottleneck_loss_coef'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num'], inner_steps = configs['inner_steps'],) elif 'MINE' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, )
trainer = MINE_BC(
3
2023-11-06 08:35:34+00:00
24k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): # change to 0.01\n for char in text:\n print(char, end='', flush=True)\n time.sleep(delay)\n print()" }, { "identifier": "shop_help", "path": "components/common_functions.py", "snippet": "def shop_help():\n print_slow(Fore.YELLOW + \"Shop Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[buy] - Use the 'buy [upgrade]' command to purchase the upgrade in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "help_user", "path": "components/common_functions.py", "snippet": "def help_user():\n print_slow(Fore.MAGENTA + \"Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[connect] - Use the 'connect' command to hack into Enigma Corps network.\")\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to view and respond to emails from your client and other characters.\")\n print_slow(\"\")\n print_slow(\"[balance] - Use the 'balance' command to view your current earnings which you can spend on upgrades. \")\n print_slow(\"\")\n print_slow(\"[shop] - Use the 'shop' command to view upgrades available in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[help] - Use the 'help' command if you need assistance at any time.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the Main Menu.\")\n print_slow(\"\")" }, { "identifier": "connect_help", "path": "components/common_functions.py", "snippet": "def connect_help():\n print_slow(Fore.MAGENTA + \"Connect Help:\" + Style.RESET_ALL)\n print_slow(\n \"[scan] - Use the 'scan' command to scan the network and search for available systems and vulnerabilities.\")\n print_slow(\"\")\n print_slow(\"[hack] - Use the 'hack [system/vulnerability]' to hack into different systems.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[disconnect] - Use the 'disconnect' command to disconnect from the current system or vulnerability.\")\n print_slow(\"\")" }, { "identifier": "mail_help", "path": "components/common_functions.py", "snippet": "def mail_help():\n print_slow(Fore.LIGHTBLUE_EX + \"Mail Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list all emails.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [subject]' command to read an email with the specified subject.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "system_help", "path": "components/common_functions.py", "snippet": "def system_help():\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to log into the users emails.\")\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list files in a users system.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [file]' command to read files in a users system\")\n print_slow(\"\")" }, { "identifier": "intro_call", "path": "conversations/calls.py", "snippet": "def intro_call():\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Welcome, Cipher. Operation Enigma is our covert mission against Enigma Corp, a powerful and secretive entity.\")\n print_slow(\n \"Your skills and secrecy have brought you to our attention. Your mission is to dig through their systems and servers looking for valuable data.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Got it, Anonymous. Exposing secrets and bringing justice. I'm in.\")\n print_slow(\"What's my first move? Talk to me about this 'EnigmaLink'.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Excellent, Cipher. EnigmaLink is a specialized tool available on the Hacker's Market. It contains a hidden backdoor, allowing access to Enigma Corps servers.\")\n print_slow(\n \"Your task is to acquire EnigmaLink and initiate your infiltration. Use the 'connect' command to navigate the network and gather crucial intelligence.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"EnigmaLink, got it. I'll secure it and initiate the infiltration. What about this employee, Amy?\")\n print_slow(\"You mentioned her password is 'sexinthecity.' What's my objective with her?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Good question, Cipher. Amy is a key target. Use her password to access her computer and gather any pertinent information.\")\n print_slow(\n \"This data is vital to our cause. Be thorough and meticulous in your investigation. The success of our operation depends on it.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Understood, Anonymous. I'll focus on Amy, gather intel, and proceed with precision.\")\n print_slow(\"Consider it done. Anything else I should know before I dive in?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"One last thing, Cipher. All collected data is highly confidential. This contract is binding, and your success is paramount.\")\n print_slow(\"Execute with diligence, and may the odds be in your favor. Good luck, Cipher.\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "first_call", "path": "conversations/calls.py", "snippet": "def first_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"That's a good start, but we already have that information.\")\n print_slow(\"Regardless, I've transferred £20 into the account for your troubles.\")\n print_slow(\"Keep digging Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "second_call", "path": "conversations/calls.py", "snippet": "def second_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Hey Cipher, you nailed it! 'Billy' just spilled the beans about wanting to climb the corporate ladder into management.\")\n print_slow(\n \"This is gold for us. We can guide 'Billy' toward training and workshops that align with our interests, nudging things in our favor.\")\n print_slow(\n \"Picture it – we're pulling the strings, helping 'Billy' grow, and steering the ship where we want it to go.\")\n print_slow(\"Keep the ball rolling, Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "third_call", "path": "conversations/calls.py", "snippet": "def third_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\"\n \"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've stumbled upon a perplexing development regarding Enigma's interest in a mysterious 'compound.'\")\n print_slow(\n \"I'm cross-referencing our existing intel to unveil more details. Stay vigilant and be prepared for the unknown.\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A compound, huh? Any hints on whether we're talking metal, chemicals, or something else entirely?\")\n print_slow(\"This feels like navigating in the dark. What exactly am I dealing with?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Response\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"Cipher, we're in the dark too. Initial reports are unclear—could be metal, chemical, or something beyond our comprehension.\")\n print_slow(\n \"Your mission is to identify the nature of this compound. Exercise extreme caution; this goes deeper than we anticipated.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Inquiry\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"So, we're playing 'guess the compound.' Any leads, any connections I should explore?\")\n print_slow(\"This is starting to sound like one of those high-stakes puzzles.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Clarification\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"I wish I had more details, Cipher. This is uncharted territory for us. Investigate discreetly, and trust no one.\")\n print_slow(\n \"I'll attempt to gather more intel. Stay on the line, and keep me updated on any findings.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fourth_call", "path": "conversations/calls.py", "snippet": "def fourth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've got our hands on an intriguing document – an Employee Performance Review for 'Billy Constantine'.\")\n print_slow(\n \"This could be a goldmine of information. Let's dig in and see if there's anything we can leverage to our advantage.\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"An Employee Performance Review? Interesting choice. What's the scoop on 'Billy Constantine'?\")\n print_slow(\"Give me the details, and we'll figure out our next move.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, 'Billy Constantine' is making waves. The review highlights exceptional performance as a sales representative.\")\n print_slow(\n \"He's exceeding sales targets, mentoring new team members, and earning a solid 4.5/5 rating. A rising star, it seems.\")\n print_slow(\"We might use this to our advantage. Let's explore how we can align his ambitions with our agenda.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Strategy\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A high-performing sales rep, huh? We could steer 'Billy' towards projects that align with our goals.\")\n print_slow(\"Let's use this performance review to our advantage. Maybe mentorship programs, leadership initiatives?\")\n print_slow(\"I'm ready to play this card strategically. What's the next move?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Great thinking, Cipher. Let's work on a plan to subtly guide 'Billy' toward initiatives that benefit us.\")\n print_slow(\"We'll need to dig deeper into 'Billy's' aspirations and weave our influence seamlessly.\")\n print_slow(\"Stay vigilant, Cipher. This could be a game-changer.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fifth_call", "path": "conversations/calls.py", "snippet": "def fifth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've intercepted some Meeting Minutes dated 24/06/2025. It's related to 'Project X' and involves key players.\")\n print_slow(\n \"This could be our chance to uncover more about Enigma's activities. Let's dive into the details and see what we can extract.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"Meeting Minutes, huh? 'Project X' sounds intriguing. Who were the players involved, and what's the agenda?\")\n print_slow(\"I'm ready to dissect this information and uncover any hidden gems.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, the meeting involved key personnel—Amy, Billy, Kyle, and others. 'Project X' is on the agenda, and there's mention of sensitive materials.\")\n print_slow(\n \"This could be a crucial insight into Enigma's plans. Let's analyze the action items and plan our next move.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Analysis\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"'Project X,' sensitive materials, and action items. This is a goldmine of information.\")\n print_slow(\n \"Let's focus on dissecting the action items and see if we can connect the dots. What's our strategy, Anonymous?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Agreed, Cipher. Let's delve into the action items, especially the data compilation and safety protocol training.\")\n print_slow(\"We might uncover more about 'Project X' and gain insights into Enigma's plans.\")\n print_slow(\"Stay sharp, Cipher. This could be a pivotal moment in our mission.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "sixth_call", "path": "conversations/calls.py", "snippet": "def sixth_call():\n print_slow(\"ADD CALL STUFF HERE\")" }, { "identifier": "markus_seen_call", "path": "conversations/calls.py", "snippet": "def markus_seen_call():\n print_slow(\"Something goes here\")" }, { "identifier": "code_shatter_call", "path": "conversations/minigame_calls.py", "snippet": "def code_shatter_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"I see you have bought CodeShatter!\")\n print_slow(\"This item is a one time use upgrade so once you get the password, it is gone so use wisely!\")\n print_slow(\"But don't threat, if you fail, you get a chance to retry. The item is only used when you get the password, so be sure to write it down!\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "code_shatter_minigame", "path": "minigames/code_shatter_minigame.py", "snippet": "def code_shatter_minigame():\n # Generate a random 5-digit number\n target = [str(random.randint(1, 9)) for _ in range(5)]\n\n print_slow(\"Welcome to CodeShatter!\")\n print_slow(\"\")\n print_slow(\"Guess the 5-digit number.\")\n print_slow(\"\")\n print_slow(\"The sequence can contain multiple same numbers\")\n print_slow(\"\")\n print_slow(Fore.GREEN + \"Green: Correct digit in correct position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"Orange: Correct digit in incorrect position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Red: Incorrect digit.\" + Style.RESET_ALL)\n print_slow(\"\")\n\n attempts = 0\n while attempts < 7:\n # Get the user's guess\n guess = input(\"Enter your guess: \")\n\n if len(guess) != 5 or not guess.isdigit():\n print_slow(\"Invalid input. Please enter a 5-digit number.\")\n continue\n\n attempts += 1\n\n # Check the guess against the target\n feedback = []\n for i in range(5):\n if guess[i] == target[i]:\n feedback.append(Fore.GREEN + guess[i] + Style.RESET_ALL)\n elif guess[i] in target:\n feedback.append(Fore.YELLOW + guess[i] + Style.RESET_ALL)\n else:\n feedback.append(Fore.RED + guess[i] + Style.RESET_ALL)\n\n print_slow(\"Feedback: \" + \" \".join(feedback))\n\n # Check if the guess is correct\n if guess == \"\".join(target):\n print_slow(Fore.GREEN + \"Access granted.\" + Style.RESET_ALL)\n break\n else:\n print_slow(Fore.RED + \"Access denied. Too many attempts.\" + Style.RESET_ALL)\n time.sleep(1)\n print_slow(\"\")\n print_slow(Fore.RED + \"Rebooting CodeShatter with new proxy...\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n code_shatter_minigame()" }, { "identifier": "port_scanning", "path": "minigames/eye_spy_minigame.py", "snippet": "def port_scanning():\n num_ports = 10\n open_ports, closed_ports = generate_ports(num_ports)\n attempts = 5\n correct_guesses = 0\n scan_attempts = 2\n\n print_slow(\"Welcome to the Port Scanning minigame!\")\n print_slow(\"\")\n print_slow(f\"Find the open ports in the range 1-{num_ports}.\")\n print_slow(\"\")\n print_slow(f\"You have {attempts} attempts.\")\n print_slow(\"\")\n\n while scan_attempts > 0:\n print_slow(\"\")\n print_slow(f\"\\nYou have {scan_attempts} scan attempts left.\")\n print_slow(\"\")\n start = int(input(\"Enter the start of the range to scan: \"))\n print_slow(\"\")\n end = int(input(\"Enter the end of the range to scan: \"))\n print_slow(\"\")\n\n num_open_ports_in_range = len(open_ports.intersection(range(start, end + 1)))\n print_slow(\"\")\n print_slow(f\"There are {num_open_ports_in_range} open ports in the range {start}-{end}.\")\n\n scan_attempts -= 1\n\n while attempts > 0 and len(open_ports) > 0:\n port = int(input(\"\\nEnter a port number to guess: \"))\n\n if port in open_ports:\n print_slow(Fore.GREEN + \"Port is open!\" + Style.RESET_ALL)\n open_ports.remove(port)\n correct_guesses += 1\n elif port in closed_ports:\n print_slow(Fore.RED + \"Port is closed.\" + Style.RESET_ALL)\n closed_ports.remove(port)\n else:\n print_slow(\"Invalid port number. Please enter a number between 1 and\", num_ports)\n\n attempts -= 1\n\n if len(open_ports) == 0:\n print_slow(\n Fore.GREEN + \"\\nCongratulations! You have successfully found all the open ports and gained access to the camera.\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n else:\n print_slow(\n Fore.RED + f\"\\nHack Failed! You found {correct_guesses} out of {len(open_ports) + correct_guesses} open ports.\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n port_scanning()" }, { "identifier": "AmySystem", "path": "systems/level_1/amy/amy_system.py", "snippet": "class AmySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"return_to_work_form.txt\",\n \"content\": (\n \"Employee Name: _______________\\n\"\n \"Employee ID: ____________\\n\"\n \"Department: _______________\\n\"\n \"Date of Return: ______\\n\\n\"\n \"I, [Employee Name], certify that I have followed the company's \"\n \"guidelines for returning to work after an absence. \"\n \"I understand that it is my responsibility to adhere to all safety \"\n \"protocols and procedures to ensure the health and well-being of my \"\n \"colleagues and myself.\\n\\n\"\n \"I acknowledge that I have completed any necessary training and have \"\n \"been briefed on any updates to the company's policies and procedures. \"\n \"I am aware that I must report any symptoms or exposure to COVID-19 to \"\n \"my supervisor immediately.\\n\\n\"\n \"I am committed to doing my part to maintain a safe and healthy work \"\n \"environment for everyone. I will continue to follow all guidelines \"\n \"and protocols and will cooperate with any additional measures that \"\n \"may be implemented in the future.\\n\\n\"\n \"Signature: [Employee Signature]\\n\"\n \"Date: [Date]\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"benefits_summary.txt\",\n \"content\": (\n \"At Enigma Corps, we believe in taking care of our employees and \"\n \"offer a comprehensive benefits package to support your health, well-being, \"\n \"and financial security. Below is a summary of the benefits available to \"\n \"you as an employee of Enigma Corps.\\n\\n\"\n \"Health Insurance: We offer a choice of medical, dental, and vision \"\n \"plans to meet your needs. Our plans provide coverage for preventive care, \"\n \"hospitalization, prescription drugs, and more.\\n\\n\"\n \"Retirement Savings: We offer a 401(k) plan with a generous company \"\n \"match to help you save for your future. You can choose from a variety of \"\n \"investment options to suit your needs.\\n\\n\"\n \"Paid Time Off: We provide a generous amount of paid time off, \"\n \"including vacation, sick leave, and holiday pay. We also offer paid \"\n \"parental leave for new parents.\\n\\n\"\n \"Flexible Work Arrangements: We understand the importance of work-life \"\n \"balance and offer flexible work arrangements, such as remote work and \"\n \"flexible schedules, where possible.\\n\\n\"\n \"Wellness Programs: We offer a variety of wellness programs and \"\n \"resources to support your physical and mental health, including fitness \"\n \"classes, stress management programs, and counseling services.\\n\\n\"\n \"Professional Development: We are committed to supporting your growth \"\n \"and development and offer a variety of training and development \"\n \"opportunities, including tuition reimbursement, workshops, and seminars.\"\n \"\\n\\n\"\n \"We encourage you to review this summary carefully and take advantage of \"\n \"the benefits available to you. If you have any questions or need further \"\n \"information, please contact the HR department.\"\n )\n },\n ]\n self.emails = [\n {\n \"sender\": \"Amy\",\n \"subject\": \"Can't Stop Thinking About You\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I hope this message finds you in good spirits. I've been meaning to write to you for a while now, but I couldn't find the right words to express what I've been feeling.\\n\\n\"\n \"Ever since that night we spent together, I can't seem to get you out of my mind. There's something about the way you make me feel that I've never experienced before. \"\n \"\\nIt's exhilarating, yet terrifying all at the same time.\\n\\n\"\n \"I know we both have a lot on our plates right now, and I don't want to add any more stress to your life. But I can't help but wonder what could happen if we gave this a real shot. \"\n \"I know it's complicated, and there are a lot of factors to consider, but I think we owe it to ourselves to explore this connection we have.\\n\\n\"\n \"I understand if you're not ready to take that step, and I don't want to pressure you into anything you're not comfortable with. \"\n \"\\nBut I can't shake the feeling that we could have something truly special together.\\n\\n\"\n \"I'd love to hear your thoughts on this, and I'm more than willing to take things slow if that's what you need. Maybe we could meet up for dinner and talk about it in person?\"\n \" I think it would be easier to have this conversation face-to-face.\\n\\n\"\n \"I hope you're doing well, and I look forward to hearing from you soon.\\n\\n\"\n \"Take care,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and ask for your help on the Smith project. I've been having some trouble with the data analysis portion,\"\n \"\\nand I know you have a lot of experience in that area.\\n\\n\"\n \"The project involves analyzing customer feedback data to identify trends and areas for improvement. I've been working on it for a few weeks now, but I'm finding it challenging to make sense of the data and\"\n \"\\ndraw meaningful conclusions.\\n\\n\"\n \"Would you be available for a quick meeting later this week to go over some of the data with me? I would really appreciate your input and guidance on this. \"\n \"\\nI think your expertise could really help me make progress and ensure the success of the project.\\n\\n\"\n \"If you're available, please let me know your preferred date and time, and I'll send out a calendar invite. I'm flexible and can work around your schedule.\\n\\n\"\n \"Thank you in advance for your help, and I look forward to hearing from you soon.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Request for Time Off\",\n \"body\": (\n \"Good Afternoon Katie,\\n\\n\"\n \"I hope this email finds you well. I wanted to request some time off next month for a family vacation. I am planning to be out of the office from 10/09/2024 to 18/09/2024\\n\\n\"\n \"I have been working hard on the Johnson project and have made significant progress. I will make sure to finish up any outstanding work and hand off any ongoing projects to my colleagues before I leave. I will also be available by email in case of any urgent matters.\\n\\n\"\n \"I understand that this is a busy time for the team, and I want to ensure that my absence doesn't cause any disruptions. I have already spoken to Markus and he has kindly agreed to cover for me while I'm away.\\n\\n\"\n \"Thank you for considering my request. I look forward to spending some quality time with my family and coming back to work refreshed and recharged.\"\n \"\\nI am confident that the time off will help me come back with renewed energy and focus.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Apology for the Mistake\",\n \"body\": (\n \"Good Morning Kyle,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and apologize for the mistake I made on the Johnson report. I realize now that I overlooked some important data, and I take full responsibility for it.\\n\\n\"\n \"I have gone back and corrected the report, and I will make sure to double-check my work in the future to avoid any similar mistakes. I have also attached the updated report for your reference.\\n\\n\"\n \"I understand if you are disappointed or frustrated, and I am more than willing to do whatever it takes to make it right. Please let me know if there's anything else I can do to fix this,\"\n \"\\nor if you would like to discuss this further.\\n\\n\"\n \"Once again, I am truly sorry for the mistake, and I appreciate your understanding. I value our working relationship and hope that this incident doesn't tarnish it. I am committed to making amends and ensuring that this doesn't happen again in the future.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I wanted to take a moment to express my gratitude for allowing me to use your computer while mine was being serviced by IT. \"\n \"It was a huge help and allowed me to stay productive during that time.\\n\\n\"\n \"I also noticed that your password is 'football'. While I understand it's easy to remember, it's important to choose a more secure password to protect your accounts.\"\n \"\\nI would recommend changing it to something more complex and unique. You never know who's watching after all.\\n\\n\"\n \"Thanks again for your generosity and understanding.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "BillySystem", "path": "systems/level_1/billy/billy_system.py", "snippet": "class BillySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"cover_letter.txt\",\n \"content\": (\n \"Dear Hiring Manager,\\n\\n\"\n \"I am writing to express my interest in the management position at Enigma Corps. \"\n \"I have been with the company for over 7 years and have consistently demonstrated my commitment to driving excellence and fostering collaboration within the team.\\n\\n\"\n \"During my tenure at Enigma Corps, I have been involved in various projects, including the successful completion of the Q3 deliverables project, where I played a key role in the planning and execution stages. \"\n \"My dedication to achieving project milestones and my ability to work under pressure make me a strong candidate for a management role.\\n\\n\"\n \"I possess strong leadership skills, which I have honed through my experiences in leading teams and coordinating cross-functional efforts. \"\n \"My ability to communicate effectively and build positive relationships with team members and stakeholders has resulted in successful project outcomes and increased productivity.\\n\\n\"\n \"In addition to my technical and leadership skills, I am also committed to continuous learning and professional development. \"\n \"I have participated in various training programs and workshops to enhance my management skills and stay up-to-date with industry trends and best practices.\\n\\n\"\n \"I am excited about the opportunity to contribute to the growth and success of Enigma Corps as a member of the management team. \"\n \"I am confident that my skills and experience will be valuable assets to the company, and I look forward to the opportunity to work closely with the team to drive innovation and excellence.\\n\\n\"\n \"Thank you for considering my application. I am looking forward to the opportunity to discuss my qualifications further and explore how I can contribute to the success of Enigma Corps.\\n\\n\"\n \"Sincerely,\\n\"\n \"Billy Constantine\\n\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"meeting_minutes.txt\",\n \"content\": (\n \"Meeting Minutes\\n\\n\"\n \"Date: 24/06/2025\\n\"\n \"Location: REDACTED\\n\"\n \"Attendees: Amy, REDACTED, Billy, Kyle, REDACTED, REDACTED, REDACTED\\n\\n\"\n \"Agenda:\\n\"\n \"- Discuss progress on Project REDACTED\\n\"\n \"- Review safety protocols for handling sensitive materials\\n\"\n \"- Plan next steps for research and development\\n\\n\"\n \"Action Items:\\n\"\n \"- Compile data from recent experiments and share with team\\n\"\n \"- Schedule training session on updated safety protocols\\n\"\n \"- Develop timeline for next phase of Project X\\n\\n\"\n \"Next Meeting: 05/08/24, 12:00pm\\n\"\n )\n },\n {\n \"name\": \"employee_performance_review.txt\",\n \"content\": (\n \"Employee Performance Review\\n\\n\"\n \"Employee Name: Billy Constantine\\n\"\n \"Employee ID: 035854\\n\"\n \"Review Date: 28/06/2024\\n\\n\"\n \"Performance Summary:\\n\"\n \"Billy has demonstrated exceptional performance in his role as a sales representative. He has consistently exceeded sales targets, built strong relationships with clients, and demonstrated leadership qualities in team meetings and projects.\\n\\n\"\n \"Strengths:\\n\"\n \"- Exceeded quarterly sales targets by 15%.\\n\"\n \"- Successfully onboarded and mentored two new team members.\\n\"\n \"- Demonstrated excellent communication and negotiation skills.\\n\\n\"\n \"Areas for Improvement:\\n\"\n \"- Time management skills can be further developed to ensure all tasks are completed in a timely manner.\\n\"\n \"- Continued development of technical knowledge to stay up-to-date with industry trends.\\n\"\n \"- Strengthen collaboration with cross-functional teams to drive more integrated solutions.\\n\\n\"\n \"Goals for Next Review Period:\\n\"\n \"- Increase sales targets by 20%.\\n\"\n \"- Complete a management training program.\\n\"\n \"- Improve time management skills through prioritization and delegation.\\n\\n\"\n \"Overall Rating: 4.5/5\\n\"\n \"Reviewer Name: Katie Thompson\\n\"\n \"Reviewer Signature: Katie Thompson\\n\"\n \"Date: 28/06/2024\\n\"\n )\n }\n ]\n self.emails = [\n\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Amy,\\n\\n\"\n \"I hope this message finds you in great spirits! I'm more than happy to lend a helping hand with the Smith project. After all, two heads are better than one, especially when it comes to data analysis, right?\\n\\n\"\n \"How about we grab a coffee and chat about the project in person? I think it would be nice to catch up and discuss the data over a cup of joe. I'm sure we can brainstorm some ideas and come up with a game plan together.\\n\\n\"\n \"I'm free [date] at [time], does that work for you? If not, just let me know your availability, and we can find a time that suits us both. I'm really looking forward to our coffee date and tackling the project together.\\n\\n\"\n \"Can't wait to see you and dive into the data!\\n\\n\"\n \"Best,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Project Update\",\n \"body\": (\n \"Hello Team,\\n\\n\"\n \"I wanted to provide everyone with a quick update on our progress with the Q3 deliverables project. We've successfully completed the initial research phase and are now moving into the planning stage.\\n\\n\"\n \"In our last meeting, we discussed the following key points:\\n\"\n \"- Compound Analysis: We've identified a unique compound with potential applications in various industries. Further testing and analysis are required to unlock its full potential.\\n\"\n \"- Resource Management: We've allocated a special team and dedicated resources to handle the delicate nature of this project, ensuring utmost confidentiality and security.\\n\"\n \"- Safety Protocols: We've developed strict safety protocols to handle the compound, and we're conducting regular training sessions to ensure compliance.\\n\\n\"\n \"Our next steps include finalizing the project plan, assigning tasks to team members, and setting deadlines. I would appreciate input and feedback from all team members to ensure we're on the right track. Please review the attached project plan document for more details.\\n\\n\"\n \"Additionally, I want to remind everyone of the confidential nature of this project. It's imperative that we maintain discretion and follow all security protocols to safeguard our work. Let's work together to make this project a success and uphold the company's reputation for innovation and excellence.\\n\\n\"\n \"If you have any questions or concerns, please don't hesitate to reach out. Your cooperation and commitment to this project are greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Can't Stop Thinking About You\",\n \"body\": (\n \"Hey there, Amy,\\n\\n\"\n \"Wow, your message really caught me by surprise! But in the best way possible, of course. I've been trying to play it cool, but I have to admit, I've been thinking about that night a lot too. There was just something electric in the air, wasn't there?\\n\\n\"\n \"I've been tossing and turning, wondering if I should reach out to you or if I should wait for you to make the first move. I guess you beat me to it, and I'm glad you did. It's like you read my mind.\\n\\n\"\n \"I can't deny that there's a certain chemistry between us, and I'm intrigued to see where it could lead. I agree that our lives are complicated, and we don't want to add more stress to each other's plates. But sometimes, taking a risk is what makes life exciting, don't you think?\\n\\n\"\n \"I don't want to rush things or make you feel pressured in any way. I'm more than happy to take things slow and let them unfold naturally. But I can't help but imagine the possibilities if we give this a real shot. We could have something truly special, and I don't want to let that pass us by.\\n\\n\"\n \"How about we meet up for dinner and drinks next week? We can talk about it more and see where the night takes us. I think it would be a fun and relaxed way to get to know each other better and explore this connection we have. What do you say?\\n\\n\"\n \"I hope you're doing well, and I'm eagerly awaiting your reply. Until then, I'll be daydreaming about our next encounter.\\n\\n\"\n \"Take care, and talk to you soon.\\n\\n\"\n \"Yours truly,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Amy,\\n\\n\"\n \"No problem at all! I'm always here to help out when I can. It's what teammates do, right?\\n\\n\"\n \"Oh, and about the password thing – haha, I know it's not the most secure choice. I've been meaning to change it, but I guess old habits die hard, right? \"\n \"Thanks for looking out for me though! I'll try to come up with something a bit more creative next time.\\n\\n\"\n \"If you ever need anything else, just give me a shout. Happy to help!\\n\\n\"\n \"Take care,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Professional Development\",\n \"body\": (\n \"Good Evening Katie,\\n\\n\"\n \"I hope this email finds you well. I'm reaching out to express my interest in professional development opportunities within the company, particularly in the area of management and leadership.\\n\\n\"\n \"I've been with the company for several years now, and I've had the chance to work on various projects and collaborate with different teams. I'm keen to build on this experience and take on more responsibility, and I believe that acquiring the necessary skills for a management role would be a great next step in my career.\\n\\n\"\n \"Could you please provide information on available training programs, workshops, or seminars that focus on leadership development and management skills? I'm particularly interested in areas such as team leadership, strategic planning, conflict resolution, and decision-making.\\n\\n\"\n \"Additionally, if there are any tuition reimbursement programs or resources for management training and certification, I'd like to learn more about them. I'm committed to investing time and effort in my professional growth and believe that these opportunities would greatly benefit both myself and the company.\\n\\n\"\n \"Your guidance and assistance in exploring these options would be greatly appreciated. I look forward to your response and any recommendations you may have.\\n\\n\"\n \"Thank you for your support, and I'm excited about the prospect of contributing to the company's success in a management role.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "camera_first", "path": "systems/level_1/cameras/camera_1.py", "snippet": "def camera_first():\n print(camera_1)\n print()\n print()\n move = input(Fore.GREEN + \"> \" + Style.RESET_ALL)\n\n if move.lower() == \"forward\":\n clear_terminal()\n camera_second()\n elif move.lower() == \"back\":\n print(Fore.RED + \"There is nothing to go back to...\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n camera_first()" }, { "identifier": "MarkusSystem", "path": "systems/level_1/markus/markus_system.py", "snippet": "class MarkusSystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"system_log.txt\",\n \"content\": (\n \"Enigma Corps System Log\\n\\n\"\n \"Date: 2023-11-16 08:00 AM\\n\"\n \"Event Type: System Startup\\n\"\n \"Description: The Enigma Corps systems smoothly initiated startup procedures, ensuring a seamless beginning to the workday.\\n\\n\"\n \"Date: 2023-11-16 10:30 AM\\n\"\n \"Event Type: Network Upgrade\\n\"\n \"Description: Implemented a network upgrade to enhance data transfer speeds, providing improved efficiency across departments.\\n\\n\"\n \"Date: 2023-11-16 01:45 PM\\n\"\n \"Event Type: Security Patch Applied\\n\"\n \"Description: Critical security patch successfully applied to safeguard against potential vulnerabilities, ensuring system integrity.\\n\\n\"\n \"Date: 2023-11-16 04:20 PM\\n\"\n \"Event Type: Server Maintenance\\n\"\n \"Description: Conducted routine maintenance on Enigma Corps servers, optimizing performance and minimizing downtime.\\n\\n\"\n \"This dynamic system log captures key events, from the smooth startup of the day to network upgrades, security enhancements, and routine maintenance. It serves as a valuable record for troubleshooting and analysis, ensuring the optimal functionality of Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"technical_documentation.docx\",\n \"content\": (\n \"Enigma Corps System Technical Documentation\\n\\n\"\n \"1. System Architecture:\\n\"\n \" - Overview of the system's structural design and components.\\n\\n\"\n \"2. Network Configuration:\\n\"\n \" - Details on the configuration of Enigma Corps' network setup for efficient communication.\\n\\n\"\n \"3. Security Protocols:\\n\"\n \" - Comprehensive overview of security measures and protocols implemented to safeguard sensitive data.\\n\\n\"\n \"4. Troubleshooting Guide:\\n\"\n \" - Step-by-step guide for identifying and resolving common issues to ensure seamless system functionality.\\n\\n\"\n \"5. Software Installation Procedures:\\n\"\n \" - Instructions for installing and updating software components within the Enigma Corps system.\\n\\n\"\n \"6. Hardware Specifications:\\n\"\n \" - Detailed specifications of the hardware components utilized in the Enigma Corps infrastructure.\\n\\n\"\n \"This meticulously crafted technical documentation serves as a go-to resource for understanding the Enigma Corps system, covering everything from its architecture and network configuration to security protocols, troubleshooting, and hardware specifications. It's an invaluable reference for maintaining optimal system performance.\"\n )\n },\n {\n \"name\": \"passwords.txt\",\n \"content\": (\n \"Sensitive Password Information for Enigma Corps\\n\\n\"\n \"Admin Password: *********\\n\"\n \"Database Password: *********\\n\"\n \"Router Password: *********\\n\"\n \"WiFi Password: *********\\n\"\n \"Encryption Key: *********\\n\\n\"\n \"Warning: This file contains confidential information. Keep it secure, and refrain from sharing passwords without explicit authorization. Safeguarding this information is crucial to maintaining the security and integrity of the Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"software_inventory.csv\",\n \"content\": (\n \"Software Inventory for Enigma Corps\\n\\n\"\n \"Software Name, Version, License Key\\n\"\n \"1. Enigma Security Suite, v2.0, X1Y2Z3A4-B5C6D7E8-F9G0H1I2\\n\"\n \"2. DataGuard Backup, v1.5, Y3X2W1V0-U9T8S7R6-Q5P4O3N2\\n\"\n \"3. Office Suite, v2022, Z9Z8Z7Z6-Z5Z4Z3Z2-Z1Z0Z9Z8-Z7Z6Z5\\n\"\n \"4. VPN Client, v3.1, W6W5W4W3-W2W1W0-W9W8W7-W6W5W4\\n\"\n \"5. Project Management Tool, v4.2, VV8V7V6V5-V4V3V2V1-V0V9V8V7-V6V5V4\\n\\n\"\n \"Important: This inventory is crucial for tracking and managing software across Enigma Corps systems. The provided license keys are randomized for security reasons. Handle this information responsibly, and ensure it is only accessible to authorized personnel to maintain the security and compliance of our software assets.\"\n )\n }\n ]\n self.emails = [\n # Email to Management\n {\n \"sender\": \"Markus\",\n \"subject\": \"System Maintenance Scheduled\",\n \"body\": (\n \"Dear Michael,\\n\\n\"\n \"I hope this email finds you well. We wanted to inform you that we have scheduled a system maintenance session for the upcoming weekend to ensure the optimal performance and security of our systems.\\n\\n\"\n \"Maintenance Details:\\n\"\n \"- Date: 16/12/23 - 17/12/23\\n\"\n \"- Time: 3:00pm\\n\"\n \"- Duration: 1 Hour\\n\"\n \"- Impact: No impact expected\\n\\n\"\n \"During this period, there might be temporary disruptions in certain services. Our team will be working diligently to minimize any inconvenience. If you have any concerns or specific considerations, please feel free to reach out to us.\\n\\n\"\n \"Thank you for your understanding and cooperation.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Department\"\n )\n },\n {\n # Email to Employees\n \"sender\": \"Markus\",\n \"subject\": \"Upcoming Software Update\",\n \"body\": (\n \"Good afternoon, Kyle,\\n\\n\"\n \"We hope you're doing well. Our IT team is excited to inform you about an upcoming software update that will enhance the functionality and security of our systems. The update is scheduled for [Date] at [Time]. Please take note of the following details:\\n\\n\"\n \"- Expected Duration: Two Days\\n\"\n \"- Action Required: As this will be processed during the weekend, no action is required.\\n\"\n \"- Impact: While we anticipate minimal impact on your day-to-day activities, it's essential to be aware of any potential changes. These include: New UI to navigate, logging in or logging out issues.\\n\\n\"\n \"We recommend saving your work and logging out of your system before the update. If you encounter any issues post-update, don't hesitate to contact our IT support team for assistance.\\n\\n\"\n \"Thank you for your cooperation and understanding.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Support Team\"\n )\n },\n # Email from Markus to Billy\n {\n \"sender\": \"Markus\",\n \"subject\": \"Urgent: Password Security Update Required\",\n \"body\": (\n \"Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to bring to your attention the importance of updating your current password. This is not the first time I've raised this concern, and I want to emphasize its critical nature.\\n\\n\"\n \"In recent security assessments, it has been flagged that your current password might not meet the latest security standards. To ensure the safety of your account and our overall cybersecurity, it is imperative that you change your password promptly.\\n\\n\"\n \"I understand that these reminders may seem repetitive, but they stem from a genuine concern for the security of your account and our collective responsibility in maintaining a robust cybersecurity posture.\\n\\n\"\n \"Please take a moment at your earliest convenience to update your password. If you encounter any issues or have questions, feel free to reach out. Your cooperation is greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Markus, Security Team\"\n )\n }\n\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" } ]
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
15,875
password.append(char) print('*', end='', flush=True) print() # Move to the next line return ''.join(password) def hack(system_name): global seen_markus # Find the system in the all_systems list system = next((s for s in all_systems if s['name'].lower() == system_name.lower()), None) if system: if system['level'] == player_level: # Check for CodeShatter before prompting for password if system['name'] == 'Markus' and has_item("CodeShatter"): clear_terminal() code_shatter_minigame() print_slow("Password Cracked: 735@&!//") input("Press [Enter] to continue") clear_terminal() markus_system_command_loop(markus_system) add_level(player_level) remove_from_inventory(item="CodeShatter") seen_markus = True elif system['name'] == 'Lobby Camera' and has_item("EyeSpy"): port_scanning() add_level(player_level) camera_first() else: # Prompt the user for the password print_slow("") password = getpass_star("Enter password: ") print_slow("") if password == system['password']: print_slow("") print_slow(Fore.GREEN + "Access granted!" + Style.RESET_ALL) if system['name'] == 'Amy': amy_system_command_loop(amy_system) elif system['name'] == 'Billy': billy_system_command_loop(billy_system) elif system['name'] == 'Markus': markus_system_command_loop(markus_system) add_level(player_level) seen_markus = True elif system['name'] == 'Lobby Camera': camera_first() elif system['name'] == 'Kyle': # Implement Kyle System else: # Add more conditions for other systems pass else: print_slow("") print_slow(Fore.RED + "Access denied! Incorrect password." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) def list_emails(emails): print_slow(Fore.LIGHTBLUE_EX + "\nEmails:" + Style.RESET_ALL) for i, email in enumerate(emails): print_slow(Fore.LIGHTBLUE_EX + f"\n{email['subject']} - From: {email['sender']}" + Style.RESET_ALL) def read_email(emails, subject): global has_read_email, evidence global balance email_found = False for email in emails: if email['subject'].lower() == subject.lower(): email_found = True print_slow( Fore.LIGHTBLUE_EX + f"\nFrom: {email['sender']}\nSubject: {email['subject']}\n\n{email['body']}" + Style.RESET_ALL) # Check if the email is one of the specific emails that increases evidence count if email['subject'].lower() in ["project update"]: evidence_item = 3 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) third_call() if email['subject'].lower() in ["professional development"]: evidence_item = 2 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) second_call() if email['subject'].lower() == "can't stop thinking about you" and email['sender'].lower() == 'amy': evidence_item = 1 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL)
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem() markus_system = MarkusSystem() bg_music_enabled = True player_level = 1 has_started_game = False # Save the game state to a file def save_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus with open('savegame.pkl', 'wb') as f: pickle.dump( (inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus), f) # Load the game state from a file def load_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus if os.path.exists('savegame.pkl'): with open('savegame.pkl', 'rb') as f: inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus = pickle.load( f) else: # If the savegame file doesn't exist, set the default values inventory = [] player_level = 1 evidence = [] has_intro_call = False has_started_game = False seen_markus = False balance = 30000 emails = [ { "sender": "Hacker's Digest", "subject": "Weekly Hacker's Digest", "body": ( "Issue #143\n\n" "Cipher,\n\n" "Welcome to the latest edition of Hacker's Digest! In this issue: \n\n" "- Unveiling the Latest Exploits\n" "- Spotlight on Cryptocurrency Security\n" "- Interview with a Grey Hat Hacker\n" "- Tool of the Week: EnigmaLink\n\n" "Don't miss out on the latest in the world of hacking and cybersecurity. Stay informed and stay secure!\n\n" "Best regards,\n" "Hacker's Digest Team" ) }, { "sender": "The Cyber Mythbuster", "subject": "Busting Cybersecurity Myths", "body": ( "Cipher,\n\n" "Heard any wild cybersecurity myths lately? This week, we're busting the craziest ones, including:\n\n" "- Using 'Password123' for Maximum Security\n" "- Cyber Ninjas and Their Stealthy VPNs\n" "- USB Drives: The Fountain of Eternal Data\n\n" "Stay myth-free and keep on hacking (responsibly)!\n\n" "Mythbustingly,\n" "The Cyber Mythbuster" ) }, { "sender": "CyberSilliness", "subject": "Where Cyber Meets Comedy", "body": ( "Welcome to the CyberSilliness Gazette\n" "Where we believe that a good laugh is the ultimate antivirus! In this week's hilarity-packed issue:\n\n" "- Cyber Jokes to Crack You Up (Without Cracking Your Passwords)\n" "- Tech Support Horror Stories: A Comedy of Errors\n" "- Chuckle Challenge: Share Your Funniest Cybersecurity Anecdote\n" "- Meet the Cyber Clowns: Our Team's Silly Security Habits Revealed\n\n" "Laughter is contagious, and so is good cybersecurity. Dive into the giggles and stay safe!\n\n" "Silly Regards,\n" "The CyberSilliness Team" ) }, { "sender": "Security Insight Weekly", "subject": "Navigating the Cybersecurity Landscape", "body": ( "Hello Cipher,\n\n" "Welcome to Security Insight Weekly, your reliable source for navigating the ever-evolving cybersecurity landscape. In this week's issue:\n\n" "- Threat Analysis: Understanding Recent Cybersecurity Incidents\n" "- Best Practices for Endpoint Security\n" "- Industry Spotlight: Healthcare Cybersecurity Challenges\n" "- Security Compliance Update: Staying Aligned with Regulations\n\n" "Stay informed and empowered as we delve into the serious aspects of cybersecurity. Your security is our priority.\n\n" "Best regards,\n" "The Security Insight Team" ) }, ] # New function for game settings def game_settings(): global bg_music_enabled print_slow(Fore.GREEN + "░██████╗███████╗████████╗████████╗██╗███╗░░██╗░██████╗░░██████╗") print_slow(Fore.GREEN + "██╔════╝██╔════╝╚══██╔══╝╚══██╔══╝██║████╗░██║██╔════╝░██╔════╝") print_slow(Fore.GREEN + "╚█████╗░█████╗░░░░░██║░░░░░░██║░░░██║██╔██╗██║██║░░██╗░╚█████╗░") print_slow(Fore.GREEN + "░╚═══██╗██╔══╝░░░░░██║░░░░░░██║░░░██║██║╚████║██║░░╚██╗░╚═══██╗") print_slow(Fore.GREEN + "██████╔╝███████╗░░░██║░░░░░░██║░░░██║██║░╚███║╚██████╔╝██████╔╝") print_slow(Fore.GREEN + "╚═════╝░╚══════╝░░░╚═╝░░░░░░╚═╝░░░╚═╝╚═╝░░╚══╝░╚═════╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow( Fore.GREEN + f"| [Background Music] {'Enabled |' if bg_music_enabled else 'Disabled |'}" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Delete Savegame] |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Back to Main Menu] |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) if choice.lower() == "background music": # Toggle background music bg_music_enabled = not bg_music_enabled if bg_music_enabled: pygame.mixer.music.play(-1) print_slow(Fore.GREEN + "\nBackground Music Enabled" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() else: pygame.mixer.music.stop() print_slow(Fore.RED + "\nBackground Music Disabled" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() elif choice.lower() == "delete savegame": # Delete savegame confirm = input(Fore.RED + "\nAre you sure you want to delete the savegame? (yes/no): " + Style.RESET_ALL) if confirm.lower() == "yes": try: os.remove("savegame.pkl") print_slow(Fore.GREEN + "\nSavegame Deleted" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() except FileNotFoundError: print_slow(Fore.RED + "\nSavegame not found" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() elif choice.lower() == "back" or choice.lower() == "back to main menu": # Return to Main Menu print_slow(Fore.GREEN + "\nReturning to Main Menu..." + Style.RESET_ALL) time.sleep(1) clear_terminal() else: print_slow(Fore.RED + "\nInvalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() # Function to add an item to the inventory def add_to_inventory(item): inventory.append(item) def remove_from_inventory(item): if item in inventory: inventory.remove(item) def add_evidence(evidence_item): evidence.append(evidence_item) def has_evidence(evidence_item): return evidence_item in evidence # Prints the games title def main(): clear_terminal() colorama.init() print_slow(Fore.GREEN + "██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗██╗░░██╗░█████╗░████████╗" + Style.RESET_ALL) print_slow(Fore.GREEN + "██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝██║░░██║██╔══██╗╚══██╔══╝" + Style.RESET_ALL) print_slow(Fore.GREEN + "██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░███████║███████║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░██╔══██║██╔══██║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗██║░░██║██║░░██║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝░░░╚═╝░░░" + Style.RESET_ALL) # Pause for 2 seconds before clearing the console time.sleep(5) # Clear the console clear_terminal() # Main menu loop while True: print_slow(Fore.GREEN + "███╗░░░███╗░█████╗░██╗███╗░░██╗  ███╗░░░███╗███████╗███╗░░██╗██╗░░░██╗") print_slow(Fore.GREEN + "████╗░████║██╔══██╗██║████╗░██║  ████╗░████║██╔════╝████╗░██║██║░░░██║") print_slow(Fore.GREEN + "██╔████╔██║███████║██║██╔██╗██║  ██╔████╔██║█████╗░░██╔██╗██║██║░░░██║") print_slow(Fore.GREEN + "██║╚██╔╝██║██╔══██║██║██║╚████║  ██║╚██╔╝██║██╔══╝░░██║╚████║██║░░░██║") print_slow(Fore.GREEN + "██║░╚═╝░██║██║░░██║██║██║░╚███║  ██║░╚═╝░██║███████╗██║░╚███║╚██████╔╝") print_slow( Fore.GREEN + "╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝╚═╝░░╚══╝  ╚═╝░░░░░╚═╝╚══════╝╚═╝░░╚══╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Start] Start the game |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Options] Change the settings |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Exit] Exit the game |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) # Start the game if choice.lower() == "start": load_game() start_game() # Open game settings elif choice.lower() == "options": clear_terminal() game_settings() # Exit the game elif choice.lower() == "exit": print_slow(Fore.GREEN + "\nExiting..." + Style.RESET_ALL) pygame.mixer.music.stop() sys.exit() else: print_slow(Fore.RED + "\nInvalid choice, please try again." + Style.RESET_ALL) time.sleep(2) clear_terminal() # Function to get the user's balance def get_balance(): return balance # Function to add money to the user's balance def add_money(amount): global balance balance += amount # Function to subtract money from the user's balance def subtract_money(amount): global balance balance -= amount def add_level(level): global player_level player_level += level # Function to print the user's balance def print_balance(): print_slow(f"Your current balance is: £{get_balance()}") # Function to read files and marks files as evidence def read_file(file_content, file_name): global has_read_file, evidence global balance # Print the file content print_slow(Fore.LIGHTBLUE_EX + f"\n{file_name}:\n\n{file_content}" + Style.RESET_ALL) print_slow("") # Check if the file is one of the specific files that increases evidence count if file_name.lower() in ["employee_performance_review.txt"]: evidence_item = 4 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) fourth_call() if file_name.lower() in ["meeting_minutes.txt"]: evidence_item = 5 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) fifth_call() # Add more file names here as needed # Add money to balance based on the file name if file_name.lower() == "employee_performance_review.txt": balance += 30 elif file_name.lower() == "meeting_minutes.txt": balance += 50 # List of available upgrades upgrades = [ {"name": "EnigmaLink", "description": "Application required to connect to Enigma Corps network.", "price": 100}, {"name": "CodeShatter", "description": "A powerful password breaker that can crack even the strongest passwords.", "price": 250}, {"name": "EyeSpy", "description": "A privacy breaker to gain access to the smallest of cameras.", "price": 500}, {"name": "Rift", "description": "Break the barrier between the Server and Network.", "price": 800} ] # Function to display the shop def shop(): clear_terminal() print_slow(Fore.YELLOW + r''' ██╗░░██╗░█████╗░░█████╗░██╗░░██╗███████╗██████╗░  ███╗░░░███╗░█████╗░██████╗░██╗░░██╗███████╗████████╗ ██║░░██║██╔══██╗██╔══██╗██║░██╔╝██╔════╝██╔══██╗  ████╗░████║██╔══██╗██╔══██╗██║░██╔╝██╔════╝╚══██╔══╝ ███████║███████║██║░░╚═╝█████═╝░█████╗░░██████╔╝  ██╔████╔██║███████║██████╔╝█████═╝░█████╗░░░░░██║░░░ ██╔══██║██╔══██║██║░░██╗██╔═██╗░██╔══╝░░██╔══██╗  ██║╚██╔╝██║██╔══██║██╔══██╗██╔═██╗░██╔══╝░░░░░██║░░░ ██║░░██║██║░░██║╚█████╔╝██║░╚██╗███████╗██║░░██║  ██║░╚═╝░██║██║░░██║██║░░██║██║░╚██╗███████╗░░░██║░░░ ╚═╝░░╚═╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝  ╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚══════╝░░░╚═╝░░░''' + Style.RESET_ALL) print_slow(Fore.YELLOW + "\nWelcome to the Hacker's Market!" + Style.RESET_ALL) print_slow("") print_slow(Fore.YELLOW + "Here you can buy upgrades to improve your hacking abilities.\n" + Style.RESET_ALL) while True: # Display the list of available upgrades for i, upgrade in enumerate(upgrades): print_slow( Fore.YELLOW + f"\n{upgrade['name']} - {upgrade['description']} - £{upgrade['price']}" + Style.RESET_ALL) # Get the user's choice command = input(Fore.YELLOW + "\n> " + Style.RESET_ALL) # Buy the chosen upgrade if command.lower() == 'exit': print_slow(Fore.YELLOW + "\nExiting Hacker's Market" + Style.RESET_ALL) time.sleep(1) clear_terminal() start_game() elif command.lower() == 'help': shop_help() elif command.lower().startswith('buy '): upgrade_name = command[4:] # [4:] removes first 4 characters if has_item('EnigmaLink'): if upgrade_name.lower() == 'enigmalink': print_slow("") print_slow(Fore.RED + "Sold Out" + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: for upgrade in upgrades: if upgrade_name.lower() == upgrade['name'].lower(): if get_balance() >= upgrade['price']: print_slow("") print_slow( Fore.GREEN + f"You have successfully purchased {upgrade['name']} for ${upgrade['price']}!" + Style.RESET_ALL) subtract_money(upgrade['price']) print_slow("") print_balance() add_to_inventory(upgrade['name']) time.sleep(2) clear_terminal() # Check if the purchased upgrade is CodeShatter if upgrade_name.lower() == 'codeshatter': print_slow("") print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) code_shatter_call() shop() else: clear_terminal() shop() else: print_slow( Fore.RED + "You don't have enough money to buy this upgrade." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: print_slow(Fore.RED + "Invalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: for upgrade in upgrades: if upgrade_name.lower() == upgrade['name'].lower(): if get_balance() >= upgrade['price']: print_slow("") print_slow( Fore.GREEN + f"You have successfully purchased {upgrade['name']} for ${upgrade['price']}!" + Style.RESET_ALL) subtract_money(upgrade['price']) print_slow("") print_balance() add_to_inventory(upgrade['name']) time.sleep(2) clear_terminal() shop() else: print_slow( Fore.RED + "You don't have enough money to buy this upgrade." + Style.RESET_ALL) shop() else: print_slow(Fore.RED + "Invalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() # Function to start the game def start_game(): global has_intro_call, has_started_game, seen_markus if has_intro_call: clear_terminal() pass else: print_slow("\nStarting game...") time.sleep(1) print_slow("\nLoading assets...") time.sleep(1) clear_terminal() print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) intro_call() has_intro_call = True has_started_game = True print_slow(Fore.MAGENTA + "\nHint: Type 'help' to get a list of available commands." + Style.RESET_ALL) pass if seen_markus: print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) markus_seen_call() else: pass # Game command loop command = input(Fore.GREEN + "> " + Style.RESET_ALL) # Connect to the network if command.lower() == "connect": connect() # Access the mail system elif command.lower() == "mail": mail() # Display help message elif command.lower() == "help": help_user() # Check balance elif command.lower() == "balance": print_balance() # Enter shop elif command.lower() == "shop": shop() # Clear terminal elif command.lower() == "clear": clear_terminal() # Return to the main menu elif command.lower() == "exit": print_slow("Returning to Main Menu...") time.sleep(1) main() else: print_slow("Invalid command, please try again.") time.sleep(1) clear_terminal() start_game() # Save the game state save_game() # Function to check if an item is in the inventory def has_item(item): return item in inventory def scan(): print_slow("") print_slow(Fore.YELLOW + "Scanning network..." + Style.RESET_ALL) time.sleep(2) print_slow("") print_slow(Fore.YELLOW + "\nAvailable Systems:" + Style.RESET_ALL) print_slow("") for system in all_systems: if system['level'] == player_level: print_slow("") print_slow(f"{system['name']} ({system['type']})") print_slow("") def getpass_star(prompt="Password: "): print(prompt, end='', flush=True) password = [] while True: char = msvcrt.getch().decode('utf-8') if char == '\r' or char == '\n': break elif char == '\b': # Backspace if password: password.pop() print('\b \b', end='', flush=True) else: password.append(char) print('*', end='', flush=True) print() # Move to the next line return ''.join(password) def hack(system_name): global seen_markus # Find the system in the all_systems list system = next((s for s in all_systems if s['name'].lower() == system_name.lower()), None) if system: if system['level'] == player_level: # Check for CodeShatter before prompting for password if system['name'] == 'Markus' and has_item("CodeShatter"): clear_terminal() code_shatter_minigame() print_slow("Password Cracked: 735@&!//") input("Press [Enter] to continue") clear_terminal() markus_system_command_loop(markus_system) add_level(player_level) remove_from_inventory(item="CodeShatter") seen_markus = True elif system['name'] == 'Lobby Camera' and has_item("EyeSpy"): port_scanning() add_level(player_level) camera_first() else: # Prompt the user for the password print_slow("") password = getpass_star("Enter password: ") print_slow("") if password == system['password']: print_slow("") print_slow(Fore.GREEN + "Access granted!" + Style.RESET_ALL) if system['name'] == 'Amy': amy_system_command_loop(amy_system) elif system['name'] == 'Billy': billy_system_command_loop(billy_system) elif system['name'] == 'Markus': markus_system_command_loop(markus_system) add_level(player_level) seen_markus = True elif system['name'] == 'Lobby Camera': camera_first() elif system['name'] == 'Kyle': # Implement Kyle System else: # Add more conditions for other systems pass else: print_slow("") print_slow(Fore.RED + "Access denied! Incorrect password." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) def list_emails(emails): print_slow(Fore.LIGHTBLUE_EX + "\nEmails:" + Style.RESET_ALL) for i, email in enumerate(emails): print_slow(Fore.LIGHTBLUE_EX + f"\n{email['subject']} - From: {email['sender']}" + Style.RESET_ALL) def read_email(emails, subject): global has_read_email, evidence global balance email_found = False for email in emails: if email['subject'].lower() == subject.lower(): email_found = True print_slow( Fore.LIGHTBLUE_EX + f"\nFrom: {email['sender']}\nSubject: {email['subject']}\n\n{email['body']}" + Style.RESET_ALL) # Check if the email is one of the specific emails that increases evidence count if email['subject'].lower() in ["project update"]: evidence_item = 3 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) third_call() if email['subject'].lower() in ["professional development"]: evidence_item = 2 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) second_call() if email['subject'].lower() == "can't stop thinking about you" and email['sender'].lower() == 'amy': evidence_item = 1 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL)
first_call()
8
2023-11-06 09:52:13+00:00
24k
ziqi-zhang/TAOISM
python/test/test_relu.py
[ { "identifier": "register_layer", "path": "python/common_net.py", "snippet": "def register_layer(layer, name):\n layer.register_forward_hook(hooking_layer(name))\n layer.register_backward_hook(hooking_layer_backward(name))\n layer_names.append(name)" }, { "identifier": "register_weight_layer", "path": "python/common_net.py", "snippet": "def register_weight_layer(layer, name):\n register_layer(layer, name)\n layer_weight[name] = layer.weight\n linear_layer_names.append(name)" }, { "identifier": "get_layer_weight", "path": "python/common_net.py", "snippet": "def get_layer_weight(name):\n return layer_weight[name]" }, { "identifier": "get_layer_input", "path": "python/common_net.py", "snippet": "def get_layer_input(name):\n return layer_input[name]" }, { "identifier": "get_layer_weight_grad", "path": "python/common_net.py", "snippet": "def get_layer_weight_grad(name):\n return layer_weight[name].grad.data" }, { "identifier": "get_layer_output", "path": "python/common_net.py", "snippet": "def get_layer_output(name):\n return layer_output[name]" }, { "identifier": "get_layer_output_grad", "path": "python/common_net.py", "snippet": "def get_layer_output_grad(name):\n return layer_output_grad[name]" }, { "identifier": "get_layer_input_grad", "path": "python/common_net.py", "snippet": "def get_layer_input_grad(name):\n return layer_input_grad[name]" }, { "identifier": "GlobalTensor", "path": "python/enclave_interfaces.py", "snippet": "class GlobalTensor(object):\n cpu_tensor = {}\n gpu_tensors = {}\n encrypted_tensors = {}\n LinkedTags = {}\n InverseLinkedTags = {}\n IsInitEnclaveTensor = {}\n EnclaveInterface = None\n eid = None\n is_init_global_tensor = False\n\n @staticmethod\n def init():\n if GlobalTensor.is_init_global_tensor:\n return\n GlobalTensor.EnclaveInterface = EnclaveInterface()\n GlobalTensor.EnclaveInterface.init_enclave()\n GlobalTensor.is_init_global_tensor = True\n\n @staticmethod\n def destroy():\n GlobalTensor.EnclaveInterface.destroy_enclave()\n\n GlobalTensor.cpu_tensor = {}\n GlobalTensor.gpu_tensors = {}\n GlobalTensor.encrypted_tensors = {}\n GlobalTensor.LinkedTags = {}\n GlobalTensor.InverseLinkedTags = {}\n GlobalTensor.IsInitEnclaveTensor = {}\n GlobalTensor.EnclaveInterface = None\n GlobalTensor.eid = None\n GlobalTensor.is_init_global_tensor = False\n\n\n @staticmethod\n def get_eid():\n return GlobalTensor.EnclaveInterface.get_eid()\n\n @staticmethod\n def link_tags(tag1, tag2):\n if tag1 == tag2:\n return\n\n friends = []\n\n def add_friends(tag):\n nonlocal friends\n if tag in GlobalTensor.LinkedTags:\n its_leader_tag = GlobalTensor.LinkedTags[tag]\n if its_leader_tag in GlobalTensor.InverseLinkedTags:\n friends += GlobalTensor.InverseLinkedTags.pop(its_leader_tag)\n else:\n friends += [tag]\n\n add_friends(tag1)\n add_friends(tag2)\n leader_tag = min(friends)\n\n GlobalTensor.InverseLinkedTags[leader_tag] = friends\n for t in friends:\n if t in GlobalTensor.IsInitEnclaveTensor:\n raise ValueError(\"Tags must linked before tensor initialization\")\n GlobalTensor.LinkedTags[t] = leader_tag\n\n @staticmethod\n def get_remapped_tags(tag):\n return GlobalTensor.LinkedTags[tag] if tag in GlobalTensor.LinkedTags else tag\n\n @staticmethod\n def set_cpu(tag, tensor):\n GlobalTensor.cpu_tensor[tag] = tensor.to(torch.device(\"cpu\"))\n\n @staticmethod\n def set_gpu(tag, tensor):\n GlobalTensor.gpu_tensors[tag] = tensor\n\n @staticmethod\n def set_encrypted(tag, tensor):\n GlobalTensor.encrypted_tensors[tag] = tensor\n\n @staticmethod\n def get_cpu(tag):\n return GlobalTensor.cpu_tensor[tag]\n\n @staticmethod\n def get_gpu(tag):\n return GlobalTensor.gpu_tensors[tag]\n\n @staticmethod\n def get_encryption(tag):\n return GlobalTensor.encrypted_tensors[tag]\n\n @staticmethod\n def init_enclave_tensor(tag, size):\n size = list(size)\n if len(size) < 4:\n size = [1] * (4 - len(size)) + size\n remapped_tag = GlobalTensor.get_remapped_tags(tag)\n if remapped_tag in GlobalTensor.IsInitEnclaveTensor:\n return\n else:\n GlobalTensor.IsInitEnclaveTensor[remapped_tag] = True\n eid = GlobalTensor.get_eid()\n GlobalTensor.EnclaveInterface.lib.InitTensor(eid, remapped_tag, size[0], size[1], size[2], size[3])\n\n @staticmethod\n def init_encrypted_tensor(tag, shape):\n GlobalTensor.encrypted_tensors[GlobalTensor.get_remapped_tags(tag)] = \\\n GlobalTensor.EnclaveInterface.create_encrypt_torch(shape)" }, { "identifier": "SecretBatchNorm2dLayer", "path": "python/layers/batch_norm_2d.py", "snippet": "class SecretBatchNorm2dLayer(SecretActivationLayer):\n # https://pytorch.org/docs/stable/nn.html#batchnorm2d\n\n BatchSize = None\n NumChannel = None\n ImgH = None\n ImgW = None\n WeightShape = None\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False, merge_own_tensors=False\n ):\n \n super().__init__(\n sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next, merge_own_tensors\n )\n \n self.ForwardFuncName = \"BatchNorm2d\"\n self.BackwardFuncName = \"DerBatchNorm2d\"\n self.PlainFunc = torch.nn.BatchNorm2d\n self.IsAffine = True\n self.momentum = 0.1\n self.IsCumulative = (self.momentum is None)\n self.epsilon = 1e-5\n\n if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.BatchNorm2d\n # if self.is_enclave_mode:\n # self.StoreInEnclave = True\n # else:\n # self.ForwardFunc = torch.nn.BatchNorm2d\n # self.StoreInEnclave = False\n \n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n self.OutputShape = self.InputShape\n self.BatchSize, self.NumChannel, self.ImgH, self.ImgW = self.InputShape\n self.WeightShape = [self.NumChannel]\n self.LearnableParamsList = [\n LearnableParamTuple(dw_name=\"DerWeight\", w_name=\"weight\", shape=self.WeightShape),\n LearnableParamTuple(dw_name=\"DerBias\", w_name=\"bias\", shape=self.WeightShape),\n ]\n \n\n # def init(self, start_enclave=True):\n \n # if self.sid == 2:\n # return\n # TensorLoader.init(self, start_enclave)\n\n # if self.is_enclave_mode:\n # self.PlainFunc = self.PlainFunc(self.InputShape[1])\n # self.PlainFunc.eval()\n # self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n # self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n # self.get_cpu(\"RunMean\").data.copy_(self.PlainFunc.running_mean.data)\n # # inject sqrt(running_var) instead of running_var for precision\n # self.get_cpu(\"RunVar\").data.copy_(self.PlainFunc.running_var.data)\n # self.transfer_cpu_to_enclave(\"weight\")\n # self.transfer_cpu_to_enclave(\"bias\")\n # self.transfer_cpu_to_enclave(\"RunMean\")\n # self.transfer_cpu_to_enclave(\"RunVar\")\n # self.batchnorm_init(\n # self.LayerName,\n # \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n # \"RunMean\", \"RunVar\", \"CurMean\", \"CurVar\",\n # \"mu\",\n # self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,\n # int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)\n # else:\n # self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n # self.PlainFunc = self.PlainFunc(self.InputShape[1])\n # self.PlainFunc.eval()\n # self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n # self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n # self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n # self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n # self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n # self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n # self.set_cpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n # self.set_cpu(\"RunVar\", self.ForwardFunc.running_var.data)\n # self.ForwardFunc.eval()\n\n def init(self, start_enclave=True):\n # if self.LayerName == \"Layer3.10.proxies.0.bn2\":\n # st()\n TensorLoader.init(self, start_enclave)\n\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.PlainFunc.eval()\n self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n self.get_cpu(\"RunMean\").data.copy_(self.PlainFunc.running_mean.data)\n # inject sqrt(running_var) instead of running_var for precision\n self.get_cpu(\"RunVar\").data.copy_(self.PlainFunc.running_var.data)\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n self.batchnorm_init(\n self.LayerName,\n \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n \"RunMean\", \"RunVar\", \"CurMean\", \"CurVar\",\n \"mu\",\n self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,\n int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.PlainFunc.eval()\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.set_cpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n self.set_cpu(\"RunVar\", self.ForwardFunc.running_var.data)\n self.ForwardFunc.eval()\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n self.set_gpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_gpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.set_gpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n self.set_gpu(\"RunVar\", self.ForwardFunc.running_var.data)\n self.PlainFunc.eval()\n self.ForwardFunc.cuda().eval()\n\n # def inject_params(self, params):\n # if self.sid == -2:\n # raise ValueError(\"S2 has no learnable parameters for injection\")\n # self.get_cpu(\"weight\").copy_(params.weight.data)\n # self.get_cpu(\"bias\").copy_(params.bias.data)\n # self.get_cpu(\"RunMean\").copy_(params.running_mean.data)\n # # inject sqrt(running_var) instead of running_var for precision\n # self.get_cpu(\"RunVar\").copy_(params.running_var.data)\n # if self.is_enclave_mode:\n # self.transfer_cpu_to_enclave(\"weight\")\n # self.transfer_cpu_to_enclave(\"bias\")\n # self.transfer_cpu_to_enclave(\"RunMean\")\n # self.transfer_cpu_to_enclave(\"RunVar\")\n\n def inject_params(self, params):\n if self.sid == -2:\n raise ValueError(\"S2 has no learnable parameters for injection\")\n if self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.Enclave]: \n self.get_cpu(\"weight\").copy_(params.weight.data)\n self.get_cpu(\"bias\").copy_(params.bias.data)\n self.get_cpu(\"RunMean\").copy_(params.running_mean.data)\n self.get_cpu(\"RunVar\").copy_(params.running_var.data)\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.get_gpu(\"weight\").copy_(params.weight.data)\n self.get_gpu(\"bias\").copy_(params.bias.data)\n self.get_gpu(\"RunMean\").copy_(params.running_mean.data)\n self.get_gpu(\"RunVar\").copy_(params.running_var.data)\n\n def reset_plain_bn(self):\n # module = torch.BatchNorm2d()\n self.get_cpu(\"weight\").copy_(torch.ones(self.InputShape[1]))\n self.get_cpu(\"bias\").copy_(torch.zeros(self.InputShape[1]))\n self.get_cpu(\"RunMean\").copy_(torch.zeros(self.InputShape[1]))\n self.get_cpu(\"RunVar\").copy_(torch.ones(self.InputShape[1]))\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n\n\n def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:\n raise NotImplementedError\n if self.sid == -2:\n raise ValueError(\"S2 has no learnable parameters for injection\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.make_sure_cpu_is_latest(\"bias\")\n plain_layer.weight.data.copy_(self.get_cpu(\"weight\"))\n plain_layer.bias.data.copy_(self.get_cpu(\"bias\"))\n plain_layer.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n plain_layer.running_var.data.copy_(self.get_cpu(\"RunVar\"))\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n NeededTensorNames = [\n (\"input\", self.InputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"output\", self.OutputShape, None),\n # (\"DerOutput\", self.OutputShape, None),\n (\"weight\", self.WeightShape, None),\n # (\"DerWeight\", self.WeightShape, None),\n (\"bias\", self.WeightShape, None),\n # (\"DerBias\", self.WeightShape, None),\n (\"RunMean\", self.WeightShape, None),\n (\"CurMean\", self.WeightShape, None),\n (\"RunVar\", self.WeightShape, None),\n (\"CurVar\", self.WeightShape, None),\n (\"mu\", self.InputShape, None),\n ]\n else:\n NeededTensorNames = [\n (\"output\", self.OutputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n (\"weight\", self.WeightShape, None),\n # (\"DerWeight\", self.WeightShape, None),\n (\"bias\", self.WeightShape, None),\n # (\"DerBias\", self.WeightShape, None),\n # (\"DerOutput\", self.OutputShape, None)\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n # def forward(self):\n # if self.sid == 2:\n # return\n # with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n # if self.is_enclave_mode:\n # self.forward_tensor_transfer()\n # self.batchnorm_forward(self.LayerName, int(False))\n # else:\n # self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n # self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n # self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n # self.ForwardFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # # running_var of PlainFunc is ^2 of that in the enclave\n # enclave_running_var = self.get_cpu(\"RunVar\")\n # self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n # if self.LayerName == \"Layer2.0.downsample.bn\":\n # st()\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} batchnorm_forward\", verbose_level=VerboseLevel.LAYER):\n self.batchnorm_forward(self.LayerName, int(False))\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.ForwardFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_cpu(\"RunVar\")\n self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.bias.data.copy_(self.get_gpu(\"bias\"))\n self.ForwardFunc.weight.data.copy_(self.get_gpu(\"weight\"))\n self.ForwardFunc.running_mean.data.copy_(self.get_gpu(\"RunMean\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_gpu(\"RunVar\")\n self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n # st()\n # print(self.get_gpu(\"input\")[0,0,0])\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\").type(SecretConfig.dtypeForCpuOp)))\n\n def backward(self):\n raise NotImplementedError\n if self.sid == 2:\n return\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n if self.is_enclave_mode:\n self.backward_tensor_transfer()\n self.batchnorm_backward(self.LayerName)\n else:\n self.backward_tensor_transfer()\n BackwardInput, BackwardWeight, BackwardBias = self.get_cpu(\"output\").grad_fn(self.get_cpu(\"DerOutput\"))\n self.set_cpu(\"DerInput\", BackwardInput.data)\n self.set_cpu(\"DerWeight\", BackwardWeight.data)\n self.set_cpu(\"DerBias\", BackwardBias.data)\n if list(self.get_cpu(\"DerWeight\").shape) != self.WeightShape:\n real_shape = self.get_cpu(\"DerWeight\").shape\n ideal_shape = self.WeightShape\n raise ValueError(\n f\"DerWeight is not of shape self.AffineShape: real: {real_shape}, ideal: {ideal_shape}\")\n if list(self.get_cpu(\"DerBias\").shape) != self.WeightShape:\n raise ValueError(\"DerBias is not of shape self.AffineShape\")\n\n def plain_forward(self, NeedBackward=False):\n if self.sid == 2:\n return\n if self.EnclaveMode in [ExecutionModeOptions.Enclave, ExecutionModeOptions.GPU]:\n self.make_sure_cpu_is_latest(\"input\")\n self.make_sure_cpu_is_latest(\"bias\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.requires_grad_on_cpu(\"input\")\n self.PlainFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.PlainFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.PlainFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # self.PlainFunc.running_var.data.copy_(self.get_cpu(\"RunVar\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_cpu(\"RunVar\")\n self.PlainFunc.running_var.data.copy_(enclave_running_var)\n else:\n self.make_sure_cpu_is_latest(\"input\")\n self.requires_grad_on_cpu(\"input\")\n\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n torch.set_num_threads(1)\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n torch.set_num_threads(4)\n\n def plain_backward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"DerOutput\")\n GradFunction = self.PlainForwardResult.grad_fn\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n torch.set_num_threads(1)\n self.PlainBackwardResult = GradFunction(self.get_cpu(\"DerOutput\"))\n torch.set_num_threads(4)\n\n def show_plain_error(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n if self.is_enclave_mode:\n self.make_sure_cpu_is_latest(\"DerInput\")\n self.make_sure_cpu_is_latest(\"DerWeight\")\n self.make_sure_cpu_is_latest(\"DerBias\")\n else:\n self.make_sure_cpu_is_latest(\"DerInput\")\n BackwardInput, BackwardWeight, BackwardBias = self.PlainBackwardResult\n err_input = compare_expected_actual(BackwardInput, self.get_cpu(\"DerInput\"), show_where_err=False, get_relative=True)\n err_weight = compare_expected_actual(BackwardWeight, self.get_cpu(\"DerWeight\"), show_where_err=False,\n get_relative=True)\n err_bias = compare_expected_actual(BackwardBias, self.get_cpu(\"DerBias\"))\n print(f\"S{self.sid}: {self.LayerName} Backward Error input: {err_input}, weight {err_weight}, bias: {err_bias}\")\n\n def show_plain_error_forward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=False, show_values=False)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")" }, { "identifier": "SecretFlattenLayer", "path": "python/layers/flatten.py", "snippet": "class SecretFlattenLayer(SecretNonlinearLayer):\n batch_size = None\n n_features = None\n input_shape = None\n output_shape = None\n\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.StoreInEnclave = False\n self.ForwardFuncName = \"Flatten\"\n self.BackwardFuncName = \"DerFlatten\"\n\n\n def init(self, start_enclave=True):\n super().init(start_enclave)\n self.ForwardFunc = lambda x: x.view(-1, self.n_features)\n self.PlainFunc = lambda x: x.view(-1, self.n_features)\n\n def init_shape(self):\n self.input_shape = self.PrevLayer.get_output_shape()\n if len(self.input_shape) != 4:\n return ValueError(\"The dimension of the tensor form prev. layer has to be 4D.\")\n\n self.batch_size = self.input_shape[0]\n self.n_features = self.input_shape[1] * self.input_shape[2] * self.input_shape[3]\n self.output_shape = [self.batch_size, self.n_features]\n\n def get_output_shape(self):\n return self.output_shape\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n NeededTensorNames = [(\"output\", self.output_shape, None),\n (\"input\", self.input_shape, None),\n (\"DerInput\", self.input_shape, None),\n (\"DerOutput\", self.output_shape, None)\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n self.transfer_enclave_to_cpu(\"input\")\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n self.transfer_cpu_to_enclave(\"output\")\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\")))\n\n # self.forward_tensor_transfer()\n # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n\n def backward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n self.backward_tensor_transfer()\n self.set_cpu(\"DerInput\", self.get_cpu(\"DerOutput\").view(self.input_shape))\n\n def plain_forward(self, NeedBackward=False):\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n\n def plain_backward(self):\n self.make_sure_cpu_is_latest(\"DerOutput\")\n GradFunction = self.PlainForwardResult.grad_fn\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n self.PlainBackwardResult = GradFunction(self.get_cpu(\"DerOutput\"))\n\n def show_plain_error(self):\n if self.StoreInEnclave:\n self.transfer_enclave_to_cpu(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"))\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n err = compare_expected_actual(self.PlainBackwardResult, self.get_cpu(\"DerInput\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Backward Error {err}\")" }, { "identifier": "SecretInputLayer", "path": "python/layers/input.py", "snippet": "class SecretInputLayer(SecretNonlinearLayer):\n shape = None\n\n def __init__(\n self, sid, LayerName, input_shape, EnclaveMode, link_prev=True, link_next=True, \n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.shape = input_shape\n\n def link_tensors(self):\n gt.link_tags(self.get_tag(\"input\", remap=False), self.get_tag(\"output\", remap=False))\n super().link_tensors()\n\n def init_shape(self):\n return\n\n def set_input(self, tensor):\n self.set_tensor_cpu_gpu_enclave(\"input\", tensor)\n\n def get_output_shape(self):\n return self.shape\n\n def forward(self):\n return\n\n def backward(self):\n return\n\n def plain_forward(self):\n return\n\n def plain_backward(self):\n return\n\n def show_plain_error(self):\n return\n\n def print_connection_info(self):\n print(f\"{self.LayerName:30} shape{self.shape} output {self.NextLayer.LayerName:30}\")" }, { "identifier": "SecretMaxpool2dLayer", "path": "python/layers/maxpool2d.py", "snippet": "class SecretMaxpool2dLayer(SecretActivationLayer):\n def __init__(\n self, sid, LayerName, EnclaveMode, filter_hw, stride, padding, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.ForwardFuncName = \"Maxpool2d\"\n self.BackwardFuncName = \"DerMaxpool2d\"\n self.filter_hw = filter_hw\n self.startmaxpool = False\n self.PlainFunc = torch.nn.MaxPool2d\n self.maxpoolpadding = padding\n self.stride = stride\n self.STORE_CHUNK_ELEM = 401408\n\n self.ForwardFunc = torch.nn.MaxPool2d\n\n if EnclaveMode == ExecutionModeOptions.Enclave :\n self.ForwardFunc = self.maxpoolfunc\n self.BackwardFunc = self.maxpoolbackfunc\n else:\n self.ForwardFunc = torch.nn.MaxPool2d\n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n if len(self.InputShape) != 4:\n raise ValueError(\"Maxpooling2d apply only to 4D Tensor\")\n if self.InputShape[2] != self.InputShape[3]:\n raise ValueError(\"The input tensor has to be square images\")\n if self.InputShape[2] % self.stride != 0:\n raise ValueError(\"The input tensor needs padding for this filter size\")\n InputHw = self.InputShape[2]\n output_hw = InputHw // self.stride\n self.OutputShape = [self.InputShape[0], self.InputShape[1], output_hw, output_hw]\n self.HandleShape = self.InputShape\n # self.Shapefortranspose = [int(round(((self.InputShape[0] * self.InputShape[1] * self.InputShape[2] * self.InputShape[3])/262144)+1/2)), 262144, 1, 1]\n self.Shapefortranspose = [\n int(round(((self.InputShape[0] * self.InputShape[1] * self.InputShape[2] * self.InputShape[3])/self.STORE_CHUNK_ELEM)+1/2)), self.STORE_CHUNK_ELEM, 1, 1]\n\n\n def init(self, start_enclave=True):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(self.filter_hw, self.stride, self.maxpoolpadding)\n TensorLoader.init(self, start_enclave)\n\n if self.startmaxpool is False:\n self.startmaxpool = True\n return self.maxpoolinit(self.LayerName, \"inputtrans\", \"outputtrans\")\n else:\n self.ForwardFunc = self.ForwardFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n self.PlainFunc = self.PlainFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n\n # TensorLoader.init(self, start_enclave)\n # self.ForwardFunc = self.ForwardFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n # self.PlainFunc = self.PlainFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n\n # TensorLoader.init(self, start_enclave)\n\n # def forward(self):\n # with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n # self.forward_tensor_transfer()\n # # self.requires_grad_on_cpu(\"input\")\n # if self.EnclaveMode == ExecutionModeOptions.Enclave:\n # self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\")))\n # st()\n\n # # if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.Enclave:\n # # self.transfer_enclave_to_cpu(\"input\")\n # # if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n # # raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n # # self.transfer_cpu_to_enclave(\"input\")\n # # self.transfer_enclave_to_cpu(\"input\")\n # # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n # # self.transfer_cpu_to_enclave(\"output\")\n # elif self.EnclaveMode == ExecutionModeOptions.CPU:\n # if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.CPU and torch.sum(self.get_cpu(\"input\").abs()) == 0:\n # raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n # elif self.EnclaveMode == ExecutionModeOptions.GPU:\n # if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.GPU and torch.sum(self.get_gpu(\"input\").abs()) == 0:\n # raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n # self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\")))\n # else:\n # raise RuntimeError\n\n def maxpoolfunc(self, namein, nameout):\n # assume row_stride and col_stride are both None or both not None\n # assume row_pad and col_pad are both None or both not None\n # if self.LayerName == \"Layer3.0.proxies.2.maxpool\":\n # print(self.LayerName, \"Input: \", self.get_cpu(\"input\")[0,0,0,:10])\n output = self.maxpoolnew(self.LayerName, namein, nameout, self.InputShape, self.OutputShape[2], self.OutputShape[3],\n self.filter_hw, self.filter_hw, self.stride, self.stride, self.maxpoolpadding,\n self.maxpoolpadding)\n # if self.LayerName == \"Layer3.0.proxies.2.maxpool\":\n # self.transfer_enclave_to_cpu(\"output\")\n # print(self.LayerName, \"Output: \", self.get_cpu(\"output\")[0,0,0,:])\n # self.transfer_cpu_to_enclave(\"output\")\n return output\n\n def maxpoolbackfunc(self, nameout, namedout, namedin):\n return self.maxpoolback(self.LayerName, namedout, namedin, self.InputShape, self.OutputShape[2], self.OutputShape[3],\n self.filter_hw, self.filter_hw, self.row_stride, self.col_stride, self.maxpoolpadding,\n self.maxpoolpadding)" }, { "identifier": "SecretOutputLayer", "path": "python/layers/output.py", "snippet": "class SecretOutputLayer(SecretNonlinearLayer):\n TargetShape = None\n loss = 0\n\n def __init__(\n self, sid, LayerName, EnclaveMode, inference=False, link_prev=True, link_next=True, \n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.ForwardFunc = torch.nn.CrossEntropyLoss()\n self.PlainFunc = torch.nn.CrossEntropyLoss()\n self.EnclaveMode = ExecutionModeOptions.CPU\n self.inference = inference\n\n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n self.OutputShape = [1]\n self.TargetShape = [self.InputShape[0]] # number of Minibatch\n\n def init(self, start_enclave=True):\n TensorLoader.init(self, start_enclave)\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n NeededTensorNames = [\n (\"output\", self.OutputShape, None),\n (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n (\"target\", self.TargetShape, None),\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def load_target(self, tensor):\n self.set_tensor_with_name(\"target\", tensor)\n\n def get_loss(self):\n return self.loss\n \n def get_prediction(self):\n self.forward_tensor_transfer(\"input\")\n if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n raise RuntimeError(\"SGX input not load\")\n return self.get_cpu(\"input\")\n\n def forward(self):\n if not self.inference:\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n self.set_cpu(\"input\", self.get_cpu(\"input\").detach())\n self.requires_grad_on_cpu(\"input\")\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\"), self.get_cpu(\"target\")))\n loss = self.get_cpu(\"output\").item()\n self.loss = loss\n\n def backward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n self.backward_tensor_transfer(transfer_tensor=\"output\")\n self.get_cpu(\"output\").backward()\n self.set_cpu(\"DerInput\", self.get_cpu(\"input\").grad)\n\n def plain_forward(self):\n if not self.inference:\n self.make_sure_cpu_is_latest(\"input\")\n self.set_cpu(\"input\", self.get_cpu(\"input\").detach())\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"), self.get_cpu(\"target\"))\n\n def plain_backward(self):\n self.make_sure_cpu_is_latest(\"output\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n self.PlainForwardResult.backward()\n self.set_cpu(\"DerInput\", self.get_cpu(\"input\").grad)\n\n def show_plain_error(self):\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"))\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n self.make_sure_cpu_is_latest(\"DerInput\")\n\n err = compare_expected_actual(self.PlainBackwardResult, self.get_cpu(\"DerInput\"))\n print(f\"S{self.sid}: {self.LayerName} Backward Error {err}\")\n\n def print_connection_info(self):\n print(f\"{self.LayerName:30} shape{self.InputShape}{' ':30} input {self.PrevLayer.LayerName:30}\")" }, { "identifier": "SecretReLULayer", "path": "python/layers/relu.py", "snippet": "class SecretReLULayer(SecretActivationLayer):\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False, merge_own_tensors=False\n ):\n super().__init__(\n sid, LayerName, EnclaveMode, link_prev, link_next,\n manually_register_prev, manually_register_next, merge_own_tensors\n )\n self.ForwardFuncName = \"ReLU\"\n self.BackwardFuncName = \"DerReLU\"\n self.PlainFunc = torch.nn.ReLU\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.ForwardFunc = self.relufunc\n self.BackwardFunc = self.relubackfunc\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n self.ForwardFunc = torch.nn.ReLU\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.ReLU\n\n # if self.is_enclave_mode:\n # self.ForwardFunc = self.relufunc\n # self.BackwardFunc = self.relubackfunc\n # self.StoreInEnclave = True\n # else:\n # self.ForwardFunc = torch.nn.ReLU\n # self.StoreInEnclave = False\n\n def init(self, start_enclave=True):\n super().init(start_enclave)\n self.PlainFunc = self.PlainFunc()\n # if not self.is_enclave_mode:\n if self.EnclaveMode is not ExecutionModeOptions.Enclave:\n self.ForwardFunc = self.ForwardFunc()\n\n def relufunc(self, namein, nameout):\n return self.relunew(namein, nameout, self.InputShape)\n\n def relubackfunc(self, nameout, namedout, namedin):\n return self.relubackward(nameout, namedout, namedin, self.InputShape)\n\n def show_plain_error_forward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=False, show_values=False)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")" }, { "identifier": "init_communicate", "path": "python/sgx_net.py", "snippet": "def init_communicate(rank, master_address, master_port, backend='gloo'):\n os.environ['MASTER_ADDR'] = master_address\n os.environ['MASTER_PORT'] = master_port\n dist.init_process_group(backend, rank=rank, world_size=SecretConfig.worldSize)" }, { "identifier": "warming_up_cuda", "path": "python/sgx_net.py", "snippet": "def warming_up_cuda():\n device = torch.device(\"cuda:0\")\n # device = torch.device(\"cpu\")\n\n print(\"Execution device: \", device)\n print(\"PyTorch version: \", torch.__version__)\n print(\"CUDA version: \", torch.version.cuda)\n print(\"CUDA device:\", torch.cuda.get_device_name(0))\n\n batch_size, n_input_channel, n_output_channel, img_hw, filter_hw = 512, 512, 256, 4, 3\n x_shape = [batch_size, n_input_channel, img_hw, img_hw]\n w_shape = [n_output_channel, n_input_channel, filter_hw, filter_hw]\n with NamedTimerInstance(\"Warming up Cuda double\"):\n dummy_a = get_random_uniform(SecretConfig.PrimeLimit, x_shape).type(SecretConfig.dtypeForSave)\n dummy_b = get_random_uniform(SecretConfig.PrimeLimit, w_shape).type(SecretConfig.dtypeForSave)\n F.conv2d(dummy_a.cuda().type(SecretConfig.dtypeForCudaMm), dummy_b.cuda().type(SecretConfig.dtypeForCudaMm),\n padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda dobule 2nd\"):\n F.conv2d(dummy_a.cuda().type(torch.double), dummy_b.cuda().type(torch.double),\n padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda float\"):\n F.conv2d(dummy_a.cuda().type(torch.float), dummy_b.cuda().type(torch.float), padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda float 2nd\"):\n F.conv2d(dummy_a.cuda().type(torch.float), dummy_b.cuda().type(torch.float), padding=1)\n\n batch_size, n_input_channel, n_output_channel, img_hw, filter_hw = 64, 64, 64, 8, 3\n x_shape = [batch_size, n_input_channel, img_hw, img_hw]\n w_shape = [n_output_channel, n_input_channel, filter_hw, filter_hw]\n with NamedTimerInstance(\"Warming up Cpu\"):\n dummy_a = get_random_uniform(SecretConfig.PrimeLimit, x_shape).type(SecretConfig.dtypeForSave)\n dummy_b = get_random_uniform(SecretConfig.PrimeLimit, w_shape).type(SecretConfig.dtypeForSave)\n F.conv2d(dummy_a.type(SecretConfig.dtypeForCpuOp), dummy_b.type(SecretConfig.dtypeForCpuOp),\n padding=1)\n\n with NamedTimerInstance(\"Warming up CppExtension\"):\n GlobalCppExtension.get_conv2d_cudnn()" }, { "identifier": "SecretNeuralNetwork", "path": "python/sgx_net.py", "snippet": "class SecretNeuralNetwork(TensorLoader):\n nn_name = None\n layers = None\n\n def __init__(self, sid, nn_name):\n super().__init__()\n self.sid = sid\n self.init(start_enclave=False)\n self.nn_name = nn_name\n\n def set_layers(self, layers):\n self.layers = layers\n\n if not isinstance(self.layers[0], SecretInputLayer):\n raise ValueError(\"The first layer has to be input layer\")\n if not isinstance(self.layers[-1], SecretOutputLayer):\n raise ValueError(\"The last layer has to be output layer\")\n \n for i in range(len(self.layers) - 1):\n PrevLayer = self.layers[i]\n NextLayer = self.layers[i + 1]\n if not PrevLayer.manually_register_next:\n PrevLayer.register_next_layer(NextLayer)\n if not NextLayer.manually_register_prev:\n NextLayer.register_prev_layer(PrevLayer)\n\n \n for layer in self.layers:\n # print(f\"Init_shape/link layer {layer.LayerName}\")\n layer.set_eid(self.get_eid())\n layer.init_shape()\n # if layer.LayerName in [\"Layer1.0.weighted_add\", \"Layer1.0.proxies.0.bn\"]:\n # st()\n layer.link_tensors()\n # print(layer.LayerName)\n # layer.print_tensor_link_relation()\n # if layer.LayerName in [\"Layer1.0.weighted_add\", \"Layer1.0.proxies.0.bn\"]:\n # st()\n \n for idx, layer in enumerate(self.layers):\n # print(f\"Init layer {layer.LayerName}\")\n # if layer.LayerName == \"Layer1.0.main.relu2\":\n # st()\n layer.init(start_enclave=False)\n # if idx > 3:\n # print(layer.LayerName, self.layers[4].get_cpu(\"input\").shape, self.layers[4].PrevLayer.LayerName)\n\n def execute_for_each_layer(self, func, reverse=False):\n layers = self.layers[::-1] if reverse else self.layers\n for layer in layers:\n # print(f\"SID: {self.sid} {layer.LayerName}, {func}\")\n if self.sid == 2 and layer.IsDummyForS2:\n continue\n # print(\"Processing \", layer.LayerName)\n func(layer)\n \n # st()\n\n def classifier_output(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.nn_name} classifier_output\"):\n self.forward()\n if self.sid == 2:\n return\n # layers: input_layer, ..., fc_layer, output_layer\n last_fc = self.layers[-2]\n last_fc.transfer_enclave_to_cpu(\"output\")\n outputs = last_fc.get_cpu(\"output\")\n _, predicted = torch.max(outputs.data, 1)\n return predicted\n\n def get_loss(self):\n return self.layers[-1].get_loss()\n\n def forward_with_time(self):\n def run_forward(layer):\n layer.forward()\n t0 = time()\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} Forward\"):\n self.execute_for_each_layer(run_forward)\n t1 = time()\n # time in ms\n elapse_time = (t1 - t0) * (10 ** 3) \n return elapse_time\n\n def forward(self):\n def run_forward(layer):\n layer.forward()\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} Forward\"):\n self.execute_for_each_layer(run_forward)\n\n def backward(self):\n def run_backward(layer):\n layer.backward()\n with NamedTimerInstance(f\"S{self.sid}: {self.nn_name} Backward\"):\n self.execute_for_each_layer(run_backward, reverse=True)\n\n def plain_forward(self):\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} PlainForward\"):\n self.execute_for_each_layer(lambda x: x.plain_forward())\n\n def plain_backward(self):\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} PlainBackward\"):\n self.execute_for_each_layer(lambda x: x.plain_backward(), reverse=True)\n\n def show_plain_error(self):\n self.execute_for_each_layer(lambda x: x.show_plain_error())" }, { "identifier": "SgdOptimizer", "path": "python/sgx_net.py", "snippet": "class SgdOptimizer(TensorLoader):\n def __init__(self, sid):\n super().__init__()\n self.sid = sid\n self.learning_rate = 0.05\n self.momentum = 0.9\n self.weight_decay = 5e-4\n self.momentum_init_flags = defaultdict(lambda: False)\n self.ideal_momentum_buf = {}\n\n self.lr_gamma = 0.5\n self.lr_step = 30\n self.step_counter = 0\n\n self.layers = None\n\n def set_layers(self, layers):\n self.layers = layers\n\n def generate_tensor_name_list(self, force=False):\n # Run if forced or self.tensor_name_list is not generated\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n return\n\n self.tensor_name_list = []\n for layer in self.layers:\n for (DerName, ParamName, shape) in layer.LearnableParamsList:\n self.tensor_name_list.append((ParamName + \"Momentum\", shape, None))\n\n def update_params(self, test_with_ideal=False):\n if self.sid == 2:\n return\n for layer in self.layers:\n self.update_params_in_layer(layer, test_with_ideal=test_with_ideal)\n\n def update_params_in_layer(self, layer, test_with_ideal=False):\n # ref: https://github.com/pytorch/pytorch/blob/master/torch/optim/sgd.py\n if layer.LearnableParamsList is None:\n return\n\n task_ids = []\n for (der_name, param_name, shape) in layer.LearnableParamsList:\n momentum_name = param_name + \"Momentum\"\n global_momentum_name = layer.name_modifier(momentum_name)\n\n if layer.StoreInEnclave:\n if test_with_ideal:\n ideal_p, ideal_momentum = self.ideal_update_params_with_name(layer, der_name, param_name, shape)\n first_momentum = not self.momentum_init_flags[global_momentum_name]\n if first_momentum:\n # print(\"FIRST MOMENTUM\")\n self.momentum_init_flags[global_momentum_name] = True\n layer.init_enclave_tensor(momentum_name, shape)\n task_id = layer.sgd_update(param_name=param_name, grad_name=der_name, momentum_name=momentum_name,\n lr=self.learning_rate, momentum=self.momentum,\n weight_decay=self.weight_decay,\n first_momentum=first_momentum, is_async=True)\n if test_with_ideal:\n while not self.get_task_status(task_id):\n pass\n layer.generate_cpu_tensor(momentum_name, shape)\n layer.transfer_enclave_to_cpu(momentum_name)\n layer.transfer_enclave_to_cpu(param_name)\n param_err = compare_expected_actual(ideal_p, layer.get_cpu(param_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Param Error: {param_err}\")\n momentum_err = compare_expected_actual(ideal_momentum, layer.get_cpu(momentum_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Momentum Error: {momentum_err}\")\n else:\n task_ids.append(task_id)\n else:\n DerCpu = layer.get_cpu(der_name)\n ParamsCpu = layer.get_cpu(param_name)\n\n if test_with_ideal:\n ideal_p, ideal_momentum = self.ideal_update_params_with_name(layer, der_name, param_name, shape)\n\n DerCpu.add_(self.weight_decay, ParamsCpu)\n\n if not self.momentum_init_flags[global_momentum_name]:\n self.momentum_init_flags[global_momentum_name] = True\n layer.generate_cpu_tensor(momentum_name, shape)\n layer.get_cpu(momentum_name).copy_(DerCpu)\n MomentumCpu = layer.get_cpu(momentum_name)\n else:\n MomentumCpu = layer.get_cpu(momentum_name)\n MomentumCpu.mul_(self.momentum).add_(1, DerCpu)\n\n ParamsCpu.add_(-self.learning_rate, MomentumCpu)\n\n if test_with_ideal:\n param_err = compare_expected_actual(ideal_p, layer.get_cpu(param_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Param Error: {param_err}\")\n momentum_err = compare_expected_actual(ideal_momentum, layer.get_cpu(momentum_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Momentum Error: {momentum_err}\")\n\n # Wait for all tasks to be finished\n for task_id in task_ids:\n while not self.get_task_status(task_id):\n pass\n\n def ideal_update_params_with_name(self, layer, der_name, param_name, shape):\n weight_decay = self.weight_decay\n momentum = self.momentum\n dampening = 0\n nesterov = False\n lr = self.learning_rate\n\n global_momentum_name = layer.name_modifier(param_name + 'Momentum')\n\n if layer.StoreInEnclave:\n layer.transfer_enclave_to_cpu(der_name)\n layer.transfer_enclave_to_cpu(param_name)\n d_p = torch.clone(layer.get_cpu(der_name)).detach()\n p = torch.clone(layer.get_cpu(param_name)).detach()\n\n if weight_decay != 0:\n d_p.add_(weight_decay, p)\n if global_momentum_name not in self.ideal_momentum_buf:\n buf = self.ideal_momentum_buf[global_momentum_name] = torch.clone(d_p).detach()\n else:\n buf = self.ideal_momentum_buf[global_momentum_name]\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n p.add_(-lr, d_p)\n\n return p, buf" }, { "identifier": "SGXLinearBase", "path": "python/layers/sgx_linear_base.py", "snippet": "class SGXLinearBase(SecretLayerBase):\n batch_size = None\n InputShape = None\n WeightShape = None\n OutputShape = None\n\n def __init__(\n self, sid, LayerName, EnclaveMode, batch_size, n_output_features, \n n_input_features=None, is_enclave_mode=False, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n\n self.ForwardFuncName = \"SGXLinear\"\n self.BackwardFuncName = \"DerSGXLinear\"\n self.PlainFunc = torch.nn.Linear\n self.is_enclave_mode = is_enclave_mode\n self.n_output_features = n_output_features\n self.n_input_features = n_input_features\n self.batch_size = batch_size\n\n if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.Linear\n # if self.is_enclave_mode:\n # self.StoreInEnclave = True\n # else:\n # self.ForwardFunc = torch.nn.Linear\n # self.StoreInEnclave = False\n\n def init_shape(self):\n self.WeightShape = self.DerWeightShape = [self.n_output_features, self.n_input_features]\n self.BiasShape = self.DerBiasShape = [self.n_output_features]\n if self.n_input_features is None:\n self.InputShape = self.PrevLayer.get_output_shape()\n else:\n self.InputShape = self.DerInputShape = [self.batch_size, self.n_input_features]\n self.OutputShape = self.DerOutputShape = [self.batch_size, self.n_output_features]\n self.LearnableParamsList = [\n LearnableParamTuple(dw_name=\"DerWeight\", w_name=\"weight\", shape=self.WeightShape),\n LearnableParamTuple(dw_name=\"DerBias\", w_name=\"bias\", shape=self.WeightShape),\n ]\n\n def init(self, start_enclave=True):\n TensorLoader.init(self, start_enclave)\n \n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(self.n_input_features, self.n_output_features)\n self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.sgx_linear_init(\n self.LayerName,\n \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n self.batch_size, self.n_input_features, self.n_output_features)\n else:\n self.ForwardFunc = self.ForwardFunc(self.n_input_features, self.n_output_features)\n self.PlainFunc = self.PlainFunc(self.n_input_features, self.n_output_features)\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n if self.EnclaveMode is ExecutionModeOptions.CPU:\n self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.set_gpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_gpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.ForwardFunc.cuda()\n # print(\"======== SGX linear init finish\")\n\n def link_tensors(self):\n super().link_tensors()\n\n def init_params(self):\n cpu_w = torch.zeros(self.w_shape)\n torch.nn.init.xavier_normal_(cpu_w, 1)\n self.set_tensor_cpu_enclave(\"weight\", cpu_w)\n cpu_b = torch.zeros(self.b_shape)\n torch.nn.init.constant_(cpu_b, 0)\n self.set_tensor_cpu_enclave(\"bias\", cpu_b)\n\n def get_output_shape(self):\n return self.OutputShape\n\n def inject_params(self, params):\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n cpu_w = self.get_cpu(\"weight\")\n cpu_w.copy_(params.weight.data)\n self.transfer_cpu_to_enclave(\"weight\")\n cpu_b = self.get_cpu(\"bias\")\n cpu_b.copy_(params.bias.data)\n self.transfer_cpu_to_enclave(\"bias\")\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n cpu_w = self.get_cpu(\"weight\")\n cpu_w.copy_(params.weight.data)\n cpu_b = self.get_cpu(\"bias\")\n cpu_b.copy_(params.bias.data)\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n cpu_w = self.get_gpu(\"weight\")\n cpu_w.copy_(params.weight.data)\n cpu_b = self.get_gpu(\"bias\")\n cpu_b.copy_(params.bias.data)\n\n def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:\n self.make_sure_cpu_is_latest(\"weight\")\n plain_layer.weight.data.copy_(self.get_cpu(\"weight\"))\n self.make_sure_cpu_is_latest(\"bias\")\n plain_layer.bias.data.copy_(self.get_cpu(\"bias\"))\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n NeededTensorNames = [(\"output\", self.OutputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n # (\"DerOutput\", self.OutputShape, None),\n (\"weight\", self.WeightShape, None),\n (\"bias\", self.BiasShape, None),\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.forward_tensor_transfer()\n self.sgx_linear_forward(self.LayerName)\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n self.forward_tensor_transfer()\n self.requires_grad_on_cpu(\"input\")\n self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.weight.data.copy_(self.get_gpu(\"weight\"))\n self.ForwardFunc.bias.data.copy_(self.get_gpu(\"bias\"))\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\").type(SecretConfig.dtypeForCpuOp)))\n\n def plain_forward(self, NeedBackward=False):\n if self.is_enclave_mode:\n self.make_sure_cpu_is_latest(\"input\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.make_sure_cpu_is_latest(\"bias\")\n # self.requires_grad_on_cpu(\"input\")\n self.PlainFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.PlainFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n else:\n self.make_sure_cpu_is_latest(\"input\")\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n # torch.set_num_threads(1)\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n # torch.set_num_threads(4)\n\n def show_plain_error_forward(self):\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")" }, { "identifier": "SGXConvBase", "path": "python/layers/sgx_conv_base.py", "snippet": "class SGXConvBase(SecretLayerBase):\n batch_size = None\n pytorch_x_shape, sgx_x_shape = None, None\n pytorch_w_shape, sgx_w_shape = None, None\n bias_shape = None\n pytorch_y_shape, sgx_y_shape = None, None\n\n def __init__(\n self, sid, LayerName, EnclaveMode,\n n_output_channel, filter_hw, stride, padding, batch_size=None, n_input_channel=None,\n img_hw=None, bias=True,\n is_enclave_mode=False, link_prev=True, link_next=True, manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n\n self.ForwardFuncName = \"SGXConv\"\n self.BackwardFuncName = \"DerSGXConv\"\n self.PlainFunc = torch.nn.Conv2d\n self.is_enclave_mode = is_enclave_mode\n self.batch_size = batch_size\n self.n_input_channel = n_input_channel\n self.n_output_channel = n_output_channel\n self.img_hw = img_hw\n self.filter_hw = filter_hw\n self.padding = padding\n self.stride = stride\n self.bias = bias\n\n if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.Conv2d\n\n # --------------\n # Add BIAS!!!!!\n # --------------\n\n def init_shape(self):\n if self.batch_size is None and self.PrevLayer is not None:\n self.pytorch_x_shape = self.PrevLayer.get_output_shape()\n self.batch_size, self.n_input_channel, self.img_hw, _ = self.pytorch_x_shape\n else:\n self.pytorch_x_shape = [self.batch_size, self.n_input_channel, self.img_hw, self.img_hw]\n # print(self.LayerName)\n # st()\n # BHWC\n self.sgx_x_shape = [self.pytorch_x_shape[0], self.pytorch_x_shape[2], self.pytorch_x_shape[3], self.pytorch_x_shape[1]]\n # pytorch weight is out * in * h * w\n self.pytorch_w_shape = [self.n_output_channel, self.n_input_channel, self.filter_hw, self.filter_hw]\n # w shape is in * w * h * out, the transpose of out * h * w * in\n self.sgx_w_shape = [self.n_output_channel, self.filter_hw, self.filter_hw, self.n_input_channel]\n # BCHW\n self.pytorch_y_shape = calc_conv2d_output_shape_stride(self.pytorch_x_shape, self.pytorch_w_shape, self.padding, self.stride)\n # BHWC\n self.sgx_y_shape = [self.pytorch_y_shape[0], self.pytorch_y_shape[2], self.pytorch_y_shape[3], self.pytorch_y_shape[1]]\n self.bias_shape = [self.n_output_channel]\n\n # print(\n # f\"Init_shape pytorch_input {self.pytorch_x_shape}, sgx_input {self.sgx_x_shape}, \"\n # f\"pytorch_output {self.pytorch_y_shape}, sgx_output {self.sgx_y_shape}, \"\n # f\"pytorch_weight {self.pytorch_w_shape}, sgx_weight {self.sgx_w_shape}, \"\n # f\"bias {self.bias_shape}\"\n # )\n\n self.LearnableParamsList = [\n LearnableParamTuple(dw_name=\"DerWeight\", w_name=\"weight\", shape=self.sgx_w_shape),\n LearnableParamTuple(dw_name=\"DerBias\", w_name=\"bias\", shape=self.bias_shape),\n ]\n\n def init(self, start_enclave=True):\n # print(f\"Weight shape {self.sgx_w_shape}\")\n TensorLoader.init(self, start_enclave)\n \n \n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(\n self.n_input_channel, self.n_output_channel, self.filter_hw,\n self.stride, self.padding, bias=self.bias)\n weight_pytorch_form = self.PlainFunc.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n self.get_cpu(\"weight\").data.copy_(weight_tf_form)\n self.transfer_cpu_to_enclave(\"weight\")\n # Bias\n if self.bias:\n bias_data = self.PlainFunc.bias.data\n else:\n bias_data = torch.zeros(self.bias_shape)\n self.get_cpu(\"bias\").data.copy_(bias_data)\n self.transfer_cpu_to_enclave(\"bias\")\n self.sgx_conv_init(\n self.LayerName,\n \"sgx_input\", \"sgx_output\", \"weight\", \"bias\",\n # \"sgx_DerInput\", \"sgx_DerOutput\", \"DerWeight\", \"DerBias\",\n # \"input\", \"output\", \"weight\", \n # \"DerInput\", \"DerOutput\", \"DerWeight\", \n self.batch_size, self.img_hw, self.img_hw, self.n_input_channel, \n self.pytorch_y_shape[2], self.pytorch_y_shape[3], self.n_output_channel, \n self.filter_hw, self.padding, self.stride)\n elif self.EnclaveMode in[ ExecutionModeOptions.CPU, ExecutionModeOptions.GPU]:\n self.ForwardFunc = self.ForwardFunc(\n self.n_input_channel, self.n_output_channel, self.filter_hw,\n self.stride, self.padding, bias=self.bias)\n self.PlainFunc = self.PlainFunc(\n self.n_input_channel, self.n_output_channel, self.filter_hw,\n self.stride, self.padding, bias=self.bias)\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n weight_pytorch_form = list(self.ForwardFunc.parameters())[0].data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n if self.EnclaveMode is ExecutionModeOptions.CPU:\n self.set_cpu(\"weight\", weight_tf_form)\n if self.bias:\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n bias_data = self.PlainFunc.bias.data\n self.set_cpu(\"bias\", bias_data)\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.set_gpu(\"weight\", weight_tf_form)\n if self.bias:\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n bias_data = self.PlainFunc.bias.data\n self.set_gpu(\"bias\", bias_data)\n self.ForwardFunc.cuda()\n\n\n def link_tensors(self):\n super().link_tensors()\n\n def init_params(self):\n cpu_w = torch.zeros(self.sgx_w_shape)\n torch.nn.init.xavier_normal_(cpu_w, 1)\n self.set_tensor_cpu_gpu_enclave(\"weight\", cpu_w)\n\n def get_output_shape(self):\n return self.pytorch_y_shape\n \n def weight_pytorch2tf(self, weight_pytorch_form):\n # weight_pytorch_form is out * in * h * w\n # out * (h * w) * in, \n # h and w dont transpose\n # weight_tf_form = weight_pytorch_form.permute(1,3,2,0).contiguous()\n weight_tf_form = weight_pytorch_form.permute(0,2,3,1).contiguous()\n return weight_tf_form\n\n def weight_tf2pytorch(self, weight_tf_form):\n # weight_tf_form is out * (h * w) * in, the transpose of out * (h * w) * in\n # out * in * h * w\n # h and w dont transpose\n # weight_pytorch_form = weight_tf_form.permute(3, 0, 2, 1).contiguous()\n weight_pytorch_form = weight_tf_form.permute(0,3,1,2).contiguous()\n return weight_pytorch_form\n\n def feature_pytorch2tf(self, tensor_pytorch_form):\n # tensor_pytorch_form is b * in * h * w\n # b * h * w * in\n tensor_tf_form = tensor_pytorch_form.permute(0, 2, 3, 1).contiguous()\n return tensor_tf_form\n \n def feature_tf2pytorch(self, tensor_tf_form):\n # tensor_tf_form is b * h * w * in\n # b * in * h * w\n tensor_pytorch_form = tensor_tf_form.permute(0, 3, 1, 2).contiguous()\n return tensor_pytorch_form\n\n def inject_params(self, params):\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n cpu_w = self.get_cpu(\"weight\")\n weight_pytorch_form = params.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n cpu_w.copy_(weight_tf_form)\n self.transfer_cpu_to_enclave(\"weight\")\n\n # bias\n assert (\n (self.bias and params.bias is not None) or\n (not self.bias and params.bias is None)\n )\n if self.bias:\n bias_data = params.bias.data\n else:\n bias_data = torch.zeros(self.n_output_channel)\n cpu_b = self.get_cpu(\"bias\")\n cpu_b.copy_(bias_data)\n self.transfer_cpu_to_enclave(\"bias\")\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n weight_pytorch_form = params.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n self.get_cpu(\"weight\").copy_(weight_tf_form)\n # bias\n assert (\n (self.bias and params.bias is not None) or\n (not self.bias and params.bias is None)\n )\n if self.bias:\n self.get_cpu(\"bias\").copy_(params.bias.data)\n\n # Move weight to ForwardFunc\n weight_tf_form = self.get_cpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n weight_pytorch_form = params.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n self.get_gpu(\"weight\").copy_(weight_tf_form)\n # bias\n assert (\n (self.bias and params.bias is not None) or\n (not self.bias and params.bias is None)\n )\n if self.bias:\n self.get_gpu(\"bias\").copy_(params.bias.data)\n\n # Move weight to ForwardFunc\n weight_tf_form = self.get_gpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n\n\n def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:\n self.make_sure_cpu_is_latest(\"weight\")\n weight_tf_form = self.get_cpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n plain_layer.weight.data.copy_(weight_pytorch_form)\n\n assert (\n (self.bias and plain_layer.bias is not None) or\n (not self.bias and plain_layer.bias is None)\n )\n if self.bias:\n self.make_sure_cpu_is_latest(\"bias\")\n bias_data = self.get_cpu(\"bias\")\n plain_layer.weight.data.copy_(bias_data)\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n NeededTensorNames = [(\"output\", self.pytorch_y_shape, None), (\"sgx_output\", self.sgx_y_shape, None),\n (\"DerInput\", self.pytorch_x_shape, None), (\"sgx_DerInput\", self.sgx_x_shape, None),\n (\"input\", self.pytorch_x_shape, None), (\"sgx_input\", self.sgx_x_shape, None),\n (\"DerOutput\", self.pytorch_y_shape, None), (\"sgx_DerOutput\", self.sgx_y_shape, None),\n (\"weight\", self.sgx_w_shape, None),\n (\"bias\", self.bias_shape, None),\n ]\n self.tensor_name_list = NeededTensorNames\n\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer(\"input\")\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n \n # \"input\" is pytorch form\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n if self.PrevLayer.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_enclave_to_cpu(\"input\")\n input_pytorch_form = self.get_cpu(\"input\")\n \n if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n print(self.LayerName)\n raise RuntimeError(\"SGX input not load\")\n input_tf_form = self.feature_pytorch2tf(input_pytorch_form)\n self.set_cpu(\"sgx_input\", input_tf_form)\n self.transfer_cpu_to_enclave(\"sgx_input\")\n # self.forward_tensor_transfer(\"sgx_input\")\n # print(self.get_cpu(\"sgx_input\").squeeze())\n \n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} sgx_conv_forward\", verbose_level=VerboseLevel.LAYER):\n # if self.LayerName == \"Layer2.0.downsample.conv\":\n # st()\n self.sgx_conv_forward(self.LayerName)\n \n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Output Postprocess\", verbose_level=VerboseLevel.LAYER):\n self.make_sure_cpu_is_latest(\"sgx_output\")\n output_tf_form = self.get_cpu(\"sgx_output\")\n output_pytorch_form = self.feature_tf2pytorch(output_tf_form)\n self.set_cpu(\"output\", output_pytorch_form)\n self.transfer_cpu_to_enclave(\"output\")\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Weight Transfer\", verbose_level=VerboseLevel.LAYER):\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} get weight_tf_form\", verbose_level=VerboseLevel.LAYER):\n weight_tf_form = self.get_cpu(\"weight\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} weight_tf2pytorch\", verbose_level=VerboseLevel.LAYER):\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} copy data\", verbose_level=VerboseLevel.LAYER):\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} GPU conv forward\", verbose_level=VerboseLevel.LAYER):\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Weight Transfer\", verbose_level=VerboseLevel.LAYER):\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} get weight_tf_form\", verbose_level=VerboseLevel.LAYER):\n weight_tf_form = self.get_gpu(\"weight\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} weight_tf2pytorch\", verbose_level=VerboseLevel.LAYER):\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} copy data\", verbose_level=VerboseLevel.LAYER):\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} GPU conv forward\", verbose_level=VerboseLevel.LAYER):\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\").type(SecretConfig.dtypeForCpuOp)))\n\n\n def plain_forward(self, NeedBackward=False):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n self.make_sure_cpu_is_latest(\"input\")\n self.make_sure_cpu_is_latest(\"weight\")\n if self.bias:\n self.make_sure_cpu_is_latest(\"bias\")\n # self.requires_grad_on_cpu(\"input\")\n weight_tf_form = self.get_cpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n self.PlainFunc.weight.data.copy_(weight_pytorch_form)\n if self.bias:\n bias_data = self.get_cpu(\"bias\")\n self.PlainFunc.bias.data.copy_(bias_data)\n elif self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.GPU]:\n self.make_sure_cpu_is_latest(\"input\")\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n # torch.set_num_threads(1)\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n # torch.set_num_threads(4)\n\n def show_plain_error_forward(self):\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n def print_connection_info(self):\n print(f\"{self.LayerName:20} shape{self.pytorch_x_shape}{' ':20} mode{self.EnclaveMode}{' ':20} input {self.PrevLayer.LayerName:20} output {self.NextLayer.LayerName:20}\")" }, { "identifier": "ExecutionModeOptions", "path": "python/utils/basic_utils.py", "snippet": "class ExecutionModeOptions(Enum):\n Enclave = 1\n CPU = 2\n GPU = 3" }, { "identifier": "Logger", "path": "python/utils/logger_utils.py", "snippet": "class Logger(object):\n logfile_path = \"logfile.log\"\n\n def __init__(self):\n self.terminal = sys.stdout\n self.log = open(self.logfile_path, \"a\")\n\n def reset_logfile(self, path):\n self.logfile_path = path\n self.log = open(self.logfile_path, \"a\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n #this flush method is needed for python 3 compatibility.\n #this handles the flush command by doing nothing.\n #you might want to specify some extra behavior here.\n # pass\n self.terminal.flush()\n self.log.flush()" }, { "identifier": "NamedTimerInstance", "path": "python/utils/timer_utils.py", "snippet": "class NamedTimerInstance(object):\n def __init__(self, name, verbose_level=VerboseLevel.EVERY):\n self.name = name\n self.verbose_level = verbose_level\n\n def __enter__(self):\n return NamedTimer.start(self.name, verbose_level=self.verbose_level)\n ...\n\n def __exit__(self, *args):\n NamedTimer.end(self.name)\n ..." }, { "identifier": "VerboseLevel", "path": "python/utils/timer_utils.py", "snippet": "class VerboseLevel(IntEnum):\n EVERY = 1\n LAYER = 2\n RUN = 3\n EPOCH = 4" }, { "identifier": "NamedTimer", "path": "python/utils/timer_utils.py", "snippet": "class NamedTimer(object):\n __instance = None\n\n @staticmethod\n def get_instance():\n if NamedTimer.__instance is None:\n NamedTimer()\n return NamedTimer.__instance\n\n def __init__(self):\n NamedTimer.__instance = self\n self.timers = {}\n self.verbose_level = VerboseLevel.EVERY\n\n @staticmethod\n def start_timer(name, **kwargs):\n NamedTimer.get_instance().timers[name] = Timer(name, **kwargs)\n return NamedTimer.get_instance().timers[name]\n\n @staticmethod\n def start(name, **kwargs):\n return NamedTimer.get_instance().start_timer(name, **kwargs)\n\n @staticmethod\n def end_timer(name, **kwargs):\n NamedTimer.get_instance().timers[name].end(**kwargs)\n\n @staticmethod\n def end(name, tmp_name=None):\n # print(NamedTimer.get_instance().timers[name].verbose_level, NamedTimer.get_instance().verbose_level)\n NamedTimer.get_instance().end_timer(name, tmp_name=tmp_name)\n\n @staticmethod\n def set_verbose_level(verbose_level):\n if not isinstance(verbose_level, VerboseLevel):\n raise ValueError(\"Please set an enum from VerboseLevel\")\n NamedTimer.get_instance().verbose_level = verbose_level" }, { "identifier": "compare_expected_actual", "path": "python/utils/torch_utils.py", "snippet": "def compare_expected_actual(expected, actual, show_where_err=False, get_relative=False, verbose=False, show_values=False):\n def purify(x):\n # return torch.tensor(x)\n res = x\n # if not (isinstance(x, torch.Tensor) or isinstance(x, torch.Variable)):\n if not (isinstance(x, torch.Tensor) ):\n res = torch.tensor(x)\n # return x.detach().numpy()\n return res.type(torch.float).to(\"cpu\")\n expected = purify(expected)\n actual = purify(actual)\n\n if show_values:\n print(\"expected:\", expected[0, 0])\n print(\"actual:\", actual[0, 0])\n\n avg_abs_diff = torch.mean(torch.abs(expected - actual)).item()\n res = avg_abs_diff\n\n if show_where_err:\n show_indices = torch.abs(expected - actual) / torch.abs(expected) > 0.5\n # show_indices = (expected != actual)\n print(\"error indices: \", np.where(show_indices.cpu()))\n print(\"expected values:\", expected[show_indices])\n print(\"difference:\", (expected - actual)[show_indices])\n\n if get_relative:\n tmp_expected, tmp_actual = expected[expected != 0], actual[expected != 0]\n relative_diff = torch.abs(tmp_expected - tmp_actual) / torch.abs(tmp_expected)\n relative_avg_diff = torch.mean(torch.abs(tmp_actual - tmp_expected)) / torch.mean(torch.abs(tmp_expected))\n Error = namedtuple(\"Error\", (\"AvgAbsDiff\", \"RelAvgDiff\", \"AvgRelDiff\", \"StdRelDiff\"))\n res = Error(avg_abs_diff, relative_avg_diff.item(), torch.mean(relative_diff).item(), torch.std(relative_diff).item())\n\n if verbose:\n print(res)\n\n return res" } ]
import os import sys import numpy as np import torch import torch.distributed as dist import sys import pdb from pdb import set_trace as st from torch import optim, nn from python.common_net import register_layer, register_weight_layer, get_layer_weight, get_layer_input, \ get_layer_weight_grad, get_layer_output, get_layer_output_grad, get_layer_input_grad from python.enclave_interfaces import GlobalTensor from python.layers.batch_norm_2d import SecretBatchNorm2dLayer from python.layers.flatten import SecretFlattenLayer from python.layers.input import SecretInputLayer from python.layers.maxpool2d import SecretMaxpool2dLayer from python.layers.output import SecretOutputLayer from python.layers.relu import SecretReLULayer from python.sgx_net import init_communicate, warming_up_cuda, SecretNeuralNetwork, SgdOptimizer from python.layers.sgx_linear_base import SGXLinearBase from python.layers.sgx_conv_base import SGXConvBase from python.utils.basic_utils import ExecutionModeOptions from python.utils.logger_utils import Logger from python.quantize_net import NetQ from python.test_sgx_net import argparser_distributed, marshal_process, load_cifar10, seed_torch from python.utils.timer_utils import NamedTimerInstance, VerboseLevel, NamedTimer from python.utils.torch_utils import compare_expected_actual from pdb import set_trace as st
21,343
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name) compare_name_function = [("input", get_layer_input), ("output", get_layer_output), ("DerOutput", get_layer_output_grad), ] if layer_name != "conv1": compare_name_function.append(("DerInput", get_layer_input_grad)) for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) def compare_weight_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: compare_layer(layer, layer_name, save_path) compare_name_function = [("weight", get_layer_weight), ("DerWeight", get_layer_weight_grad) ] for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) def test_RELU(sid=0, master_addr=0, master_port=0, is_compare=False): batch_size = 1 n_img_channel = 2 img_hw = 2 x_shape = [batch_size, n_img_channel, img_hw, img_hw] GlobalTensor.init() input_layer = SecretInputLayer(sid, "InputLayer", x_shape, ExecutionModeOptions.Enclave) input = torch.rand(x_shape) - 0.5 print("Python input:") print(input) # input.zero_() # input += 1
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name) compare_name_function = [("input", get_layer_input), ("output", get_layer_output), ("DerOutput", get_layer_output_grad), ] if layer_name != "conv1": compare_name_function.append(("DerInput", get_layer_input_grad)) for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) def compare_weight_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: compare_layer(layer, layer_name, save_path) compare_name_function = [("weight", get_layer_weight), ("DerWeight", get_layer_weight_grad) ] for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) def test_RELU(sid=0, master_addr=0, master_port=0, is_compare=False): batch_size = 1 n_img_channel = 2 img_hw = 2 x_shape = [batch_size, n_img_channel, img_hw, img_hw] GlobalTensor.init() input_layer = SecretInputLayer(sid, "InputLayer", x_shape, ExecutionModeOptions.Enclave) input = torch.rand(x_shape) - 0.5 print("Python input:") print(input) # input.zero_() # input += 1
test_layer = SecretReLULayer(sid, f"TestReLu", ExecutionModeOptions.Enclave, merge_own_tensors=True)
14
2023-11-01 10:37:37+00:00
24k
Codra-Ingenierie-Informatique/DataLab
cdl/core/gui/panel/signal.py
[ { "identifier": "_", "path": "cdl/config.py", "snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\nTEST_SEGFAULT_ERROR = len(os.environ.get(\"TEST_SEGFAULT_ERROR\", \"\")) > 0\nDATETIME_FORMAT = \"%d/%m/%Y - %H:%M:%S\"\nDATAPATH = configtools.get_module_data_path(MOD_NAME, \"data\")\nSHOTPATH = osp.join(\n configtools.get_module_data_path(MOD_NAME), os.pardir, \"doc\", \"images\", \"shots\"\n)\nOTHER_PLUGINS_PATHLIST = [configtools.get_module_data_path(MOD_NAME, \"plugins\")]\nIS_FROZEN = is_frozen(MOD_NAME)\nPLOTPY_DEFAULTS = {\n \"plot\": {\n # \"antialiasing\": False,\n # \"title/font/size\": 12,\n # \"title/font/bold\": False,\n # \"marker/curve/text/font/size\": 8,\n # \"marker/curve/text/font/family\": \"default\",\n # \"marker/curve/text/font/bold\": False,\n # \"marker/curve/text/font/italic\": False,\n \"marker/curve/text/textcolor\": \"black\",\n # \"marker/curve/text/background_color\": \"#ffffff\",\n # \"marker/curve/text/background_alpha\": 0.8,\n # \"marker/cross/text/font/family\": \"default\",\n # \"marker/cross/text/font/size\": 8,\n # \"marker/cross/text/font/bold\": False,\n # \"marker/cross/text/font/italic\": False,\n \"marker/cross/text/textcolor\": \"black\",\n # \"marker/cross/text/background_color\": \"#ffffff\",\n \"marker/cross/text/background_alpha\": 0.7,\n # \"marker/cross/line/style\": \"DashLine\",\n # \"marker/cross/line/color\": \"yellow\",\n # \"marker/cross/line/width\": 1,\n # \"marker/cursor/text/font/size\": 8,\n # \"marker/cursor/text/font/family\": \"default\",\n # \"marker/cursor/text/font/bold\": False,\n # \"marker/cursor/text/font/italic\": False,\n # \"marker/cursor/text/textcolor\": \"#ff9393\",\n # \"marker/cursor/text/background_color\": \"#ffffff\",\n # \"marker/cursor/text/background_alpha\": 0.8,\n \"shape/drag/symbol/marker\": \"NoSymbol\",\n \"shape/mask/symbol/size\": 5,\n \"shape/mask/sel_symbol/size\": 8,\n # -----------------------------------------------------------------------------\n # Annotated shape style for annotations:\n \"shape/annotation/line/style\": \"SolidLine\",\n \"shape/annotation/line/color\": \"#ffff00\",\n \"shape/annotation/line/width\": 1,\n \"shape/annotation/fill/style\": \"SolidPattern\",\n \"shape/annotation/fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/fill/alpha\": 0.1,\n \"shape/annotation/symbol/marker\": \"Rect\",\n \"shape/annotation/symbol/size\": 3,\n \"shape/annotation/symbol/edgecolor\": \"#ffff00\",\n \"shape/annotation/symbol/facecolor\": \"#ffff00\",\n \"shape/annotation/symbol/alpha\": 1.0,\n \"shape/annotation/sel_line/style\": \"SolidLine\",\n \"shape/annotation/sel_line/color\": \"#00ff00\",\n \"shape/annotation/sel_line/width\": 1,\n \"shape/annotation/sel_fill/style\": \"SolidPattern\",\n \"shape/annotation/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/sel_fill/alpha\": 0.1,\n \"shape/annotation/sel_symbol/marker\": \"Rect\",\n \"shape/annotation/sel_symbol/size\": 9,\n \"shape/annotation/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/annotation/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/annotation/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / signals:\n \"shape/result/s/line/style\": \"SolidLine\",\n \"shape/result/s/line/color\": MAIN_FG_COLOR,\n \"shape/result/s/line/width\": 1,\n \"shape/result/s/fill/style\": \"SolidPattern\",\n \"shape/result/s/fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/fill/alpha\": 0.1,\n \"shape/result/s/symbol/marker\": \"XCross\",\n \"shape/result/s/symbol/size\": 7,\n \"shape/result/s/symbol/edgecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/facecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/alpha\": 1.0,\n \"shape/result/s/sel_line/style\": \"SolidLine\",\n \"shape/result/s/sel_line/color\": \"#00ff00\",\n \"shape/result/s/sel_line/width\": 1,\n \"shape/result/s/sel_fill/style\": \"SolidPattern\",\n \"shape/result/s/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/sel_fill/alpha\": 0.1,\n \"shape/result/s/sel_symbol/marker\": \"Rect\",\n \"shape/result/s/sel_symbol/size\": 9,\n \"shape/result/s/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/s/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/s/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / images:\n \"shape/result/i/line/style\": \"SolidLine\",\n \"shape/result/i/line/color\": \"#ffff00\",\n \"shape/result/i/line/width\": 1,\n \"shape/result/i/fill/style\": \"SolidPattern\",\n \"shape/result/i/fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/fill/alpha\": 0.1,\n \"shape/result/i/symbol/marker\": \"Rect\",\n \"shape/result/i/symbol/size\": 3,\n \"shape/result/i/symbol/edgecolor\": \"#ffff00\",\n \"shape/result/i/symbol/facecolor\": \"#ffff00\",\n \"shape/result/i/symbol/alpha\": 1.0,\n \"shape/result/i/sel_line/style\": \"SolidLine\",\n \"shape/result/i/sel_line/color\": \"#00ff00\",\n \"shape/result/i/sel_line/width\": 1,\n \"shape/result/i/sel_fill/style\": \"SolidPattern\",\n \"shape/result/i/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/sel_fill/alpha\": 0.1,\n \"shape/result/i/sel_symbol/marker\": \"Rect\",\n \"shape/result/i/sel_symbol/size\": 9,\n \"shape/result/i/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/i/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/i/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n },\n}\ndef is_frozen(module_name: str) -> bool:\ndef get_mod_source_dir() -> str | None:\n def get_def_dict(cls, category: str) -> dict:\n def set_def_dict(cls, category: str, def_dict: dict) -> None:\ndef get_old_log_fname(fname):\ndef initialize():\ndef reset():\nclass MainSection(conf.Section, metaclass=conf.SectionMeta):\nclass ConsoleSection(conf.Section, metaclass=conf.SectionMeta):\nclass IOSection(conf.Section, metaclass=conf.SectionMeta):\nclass ProcSection(conf.Section, metaclass=conf.SectionMeta):\nclass ViewSection(conf.Section, metaclass=conf.SectionMeta):\nclass Conf(conf.Configuration, metaclass=conf.ConfMeta):" }, { "identifier": "roieditor", "path": "cdl/core/gui/roieditor.py", "snippet": "class BaseROIEditorMeta(type(QW.QWidget), abc.ABCMeta):\nclass BaseROIEditor(QW.QWidget, metaclass=BaseROIEditorMeta):\nclass ROIRangeInfo(ObjectInfo):\nclass SignalROIEditor(BaseROIEditor):\nclass ImageROIEditor(BaseROIEditor):\n ICON_NAME = None\n OBJ_NAME = None\n ICON_NAME = \"signal_roi_new.svg\"\n OBJ_NAME = _(\"signal\")\n ICON_NAME = \"image_roi_new.svg\"\n OBJ_NAME = _(\"image\")\n def __init__(\n self,\n parent: QW.QDialog,\n obj: BaseObj,\n extract: bool,\n singleobj: bool | None = None,\n ):\n def modified(self) -> bool:\n def modified(self, value: bool):\n def dialog_accepted(self):\n def get_data(self) -> ROIDataParam:\n def setup_widget(self):\n def add_roi_item(self, roi_item):\n def update_roi_titles(self):\n def item_removed(self, item):\n def item_moved(self):\n def get_roi_item_coords(roi_item):\n def __init__(self, roi_items):\n def get_text(self):\n def setup_widget(self):\n def add_roi(self):\n def update_roi_titles(self):\n def get_roi_item_coords(roi_item):\n def setup_widget(self):\n def add_roi(self, geometry: RoiDataGeometries):\n def update_roi_titles(self):\n def get_roi_item_coords(roi_item):" }, { "identifier": "SignalActionHandler", "path": "cdl/core/gui/actionhandler.py", "snippet": "class SignalActionHandler(BaseActionHandler):\n \"\"\"Object handling signal panel GUI interactions: actions, menus, ...\"\"\"\n\n OBJECT_STR = _(\"signal\")\n\n def create_first_actions(self):\n \"\"\"Create actions that are added to the menus in the first place\"\"\"\n with self.new_category(ActionCategory.PROCESSING):\n self.new_action(\n _(\"Normalize\"), triggered=self.panel.processor.compute_normalize\n )\n self.new_action(\n _(\"Derivative\"), triggered=self.panel.processor.compute_derivative\n )\n self.new_action(\n _(\"Integral\"), triggered=self.panel.processor.compute_integral\n )\n\n super().create_first_actions()\n\n with self.new_category(ActionCategory.OPERATION):\n self.new_action(\n _(\"Peak detection\"),\n separator=True,\n triggered=self.panel.processor.compute_peak_detection,\n icon=get_icon(\"peak_detect.svg\"),\n )\n\n with self.new_category(ActionCategory.PROCESSING):\n self.new_action(\n _(\"Interpolation\"),\n triggered=self.panel.processor.compute_interpolation,\n )\n self.new_action(\n _(\"Resampling\"), triggered=self.panel.processor.compute_resampling\n )\n self.new_action(\n _(\"Detrending\"), triggered=self.panel.processor.compute_detrending\n )\n\n def cra_fit(title, fitdlgfunc):\n \"\"\"Create curve fitting action\"\"\"\n return self.new_action(\n title,\n triggered=lambda: self.panel.processor.compute_fit(title, fitdlgfunc),\n )\n\n with self.new_category(ActionCategory.PROCESSING):\n with self.new_menu(_(\"Fitting\")):\n cra_fit(_(\"Gaussian fit\"), fitdialog.gaussianfit)\n cra_fit(_(\"Lorentzian fit\"), fitdialog.lorentzianfit)\n cra_fit(_(\"Voigt fit\"), fitdialog.voigtfit)\n self.new_action(\n _(\"Polynomial fit\"),\n triggered=self.panel.processor.compute_polyfit,\n )\n self.new_action(\n _(\"Multi-Gaussian fit\"),\n triggered=self.panel.processor.compute_multigaussianfit,\n )\n\n with self.new_category(ActionCategory.COMPUTING):\n self.new_action(\n _(\"Full width at half-maximum\"),\n triggered=self.panel.processor.compute_fwhm,\n tip=_(\"Compute Full Width at Half-Maximum (FWHM)\"),\n )\n self.new_action(\n _(\"Full width at\") + \" 1/e²\",\n triggered=self.panel.processor.compute_fw1e2,\n tip=_(\"Compute Full Width at Maximum\") + \"/e²\",\n )\n\n with self.new_category(ActionCategory.VIEW):\n antialiasing_action = self.new_action(\n _(\"Curve anti-aliasing\"),\n icon=get_icon(\"curve_antialiasing.svg\"),\n toggled=self.panel.toggle_anti_aliasing,\n tip=_(\"Toggle curve anti-aliasing on/off (may slow down plotting)\"),\n toolbar_pos=-1,\n )\n antialiasing_action.setChecked(Conf.view.sig_antialiasing.get(True))\n\n def create_last_actions(self):\n \"\"\"Create actions that are added to the menus in the end\"\"\"\n with self.new_category(ActionCategory.OPERATION):\n self.new_action(\n _(\"Convolution\"),\n triggered=self.panel.processor.compute_convolution,\n separator=True,\n )\n super().create_last_actions()" }, { "identifier": "BaseDataPanel", "path": "cdl/core/gui/panel/base.py", "snippet": "class BaseDataPanel(AbstractPanel):\n \"\"\"Object handling the item list, the selected item properties and plot\"\"\"\n\n PANEL_STR = \"\" # e.g. \"Signal Panel\"\n PARAMCLASS: SignalObj | ImageObj = None # Replaced in child object\n ANNOTATION_TOOLS = ()\n DIALOGSIZE = (800, 600)\n # Replaced by the right class in child object:\n IO_REGISTRY: SignalIORegistry | ImageIORegistry | None = None\n SIG_STATUS_MESSAGE = QC.Signal(str) # emitted by \"qt_try_except\" decorator\n SIG_REFRESH_PLOT = QC.Signal(str, bool) # Connected to PlotHandler.refresh_plot\n ROIDIALOGOPTIONS = {}\n # Replaced in child object:\n ROIDIALOGCLASS: roieditor.SignalROIEditor | roieditor.ImageROIEditor | None = None\n\n @abc.abstractmethod\n def __init__(self, parent: QW.QWidget, plotwidget: PlotWidget, toolbar) -> None:\n super().__init__(parent)\n self.mainwindow: CDLMainWindow = parent\n self.objprop = ObjectProp(self, self.PARAMCLASS)\n self.objmodel = objectmodel.ObjectModel()\n self.objview = objectview.ObjectView(self, self.objmodel)\n self.objview.SIG_IMPORT_FILES.connect(self.handle_dropped_files)\n self.objview.populate_tree()\n self.plothandler: SignalPlotHandler | ImagePlotHandler = None\n self.processor: SignalProcessor | ImageProcessor = None\n self.acthandler: actionhandler.BaseActionHandler = None\n self.__metadata_clipboard = {}\n self.context_menu = QW.QMenu()\n self.__separate_views: dict[QW.QDialog, SignalObj | ImageObj] = {}\n\n def closeEvent(self, event):\n \"\"\"Reimplement QMainWindow method\"\"\"\n self.processor.close()\n super().closeEvent(event)\n\n # ------AbstractPanel interface-----------------------------------------------------\n def serialize_object_to_hdf5(\n self, obj: SignalObj | ImageObj, writer: NativeH5Writer\n ) -> None:\n \"\"\"Serialize object to HDF5 file\"\"\"\n # Before serializing, update metadata from plot item parameters, in order to\n # save the latest visualization settings:\n try:\n item = self.plothandler[obj.uuid]\n obj.update_metadata_from_plot_item(item)\n except KeyError:\n # Plot item has not been created yet (this happens when auto-refresh has\n # been disabled)\n pass\n super().serialize_object_to_hdf5(obj, writer)\n\n def serialize_to_hdf5(self, writer: NativeH5Writer) -> None:\n \"\"\"Serialize whole panel to a HDF5 file\"\"\"\n with writer.group(self.H5_PREFIX):\n for group in self.objmodel.get_groups():\n with writer.group(self.get_serializable_name(group)):\n with writer.group(\"title\"):\n writer.write_str(group.title)\n for obj in group.get_objects():\n self.serialize_object_to_hdf5(obj, writer)\n\n def deserialize_from_hdf5(self, reader: NativeH5Reader) -> None:\n \"\"\"Deserialize whole panel from a HDF5 file\"\"\"\n with reader.group(self.H5_PREFIX):\n for name in reader.h5.get(self.H5_PREFIX, []):\n with reader.group(name):\n group = self.add_group(\"\")\n with reader.group(\"title\"):\n group.title = reader.read_str()\n for obj_name in reader.h5.get(f\"{self.H5_PREFIX}/{name}\", []):\n obj = self.deserialize_object_from_hdf5(reader, obj_name)\n self.add_object(obj, group.uuid, set_current=False)\n self.selection_changed()\n\n def __len__(self) -> int:\n \"\"\"Return number of objects\"\"\"\n return len(self.objmodel)\n\n def __getitem__(self, nb: int) -> SignalObj | ImageObj:\n \"\"\"Return object from its number (1 to N)\"\"\"\n return self.objmodel.get_object_from_number(nb)\n\n def __iter__(self):\n \"\"\"Iterate over objects\"\"\"\n return iter(self.objmodel)\n\n def create_object(self) -> SignalObj | ImageObj:\n \"\"\"Create object (signal or image)\n\n Returns:\n SignalObj or ImageObj object\n \"\"\"\n return self.PARAMCLASS() # pylint: disable=not-callable\n\n @qt_try_except()\n def add_object(\n self,\n obj: SignalObj | ImageObj,\n group_id: str | None = None,\n set_current: bool = True,\n ) -> None:\n \"\"\"Add object\n\n Args:\n obj: SignalObj or ImageObj object\n group_id: group id\n set_current: if True, set the added object as current\n \"\"\"\n if obj in self.objmodel:\n # Prevent adding the same object twice\n raise ValueError(\n f\"Object {hex(id(obj))} already in panel. \"\n f\"The same object cannot be added twice: \"\n f\"please use a copy of the object.\"\n )\n if group_id is None:\n group_id = self.objview.get_current_group_id()\n if group_id is None:\n groups = self.objmodel.get_groups()\n if groups:\n group_id = groups[0].uuid\n else:\n group_id = self.add_group(\"\").uuid\n obj.check_data()\n self.objmodel.add_object(obj, group_id)\n self.objview.add_object_item(obj, group_id, set_current=set_current)\n self.SIG_OBJECT_ADDED.emit()\n self.objview.update_tree()\n\n def remove_all_objects(self) -> None:\n \"\"\"Remove all objects\"\"\"\n # iterate over a copy of self.__separate_views dict keys to avoid RuntimeError:\n # dictionary changed size during iteration\n for dlg in list(self.__separate_views):\n dlg.done(QW.QDialog.DialogCode.Rejected)\n self.objmodel.clear()\n self.plothandler.clear()\n self.objview.populate_tree()\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n super().remove_all_objects()\n\n # ---- Signal/Image Panel API ------------------------------------------------------\n def setup_panel(self) -> None:\n \"\"\"Setup panel\"\"\"\n self.acthandler.create_all_actions()\n self.processor.SIG_ADD_SHAPE.connect(self.plothandler.add_shapes)\n self.SIG_REFRESH_PLOT.connect(self.plothandler.refresh_plot)\n self.objview.SIG_SELECTION_CHANGED.connect(self.selection_changed)\n self.objview.SIG_ITEM_DOUBLECLICKED.connect(\n lambda oid: self.open_separate_view([oid])\n )\n self.objview.SIG_CONTEXT_MENU.connect(self.__popup_contextmenu)\n self.objprop.properties.SIG_APPLY_BUTTON_CLICKED.connect(\n self.properties_changed\n )\n self.addWidget(self.objview)\n self.addWidget(self.objprop)\n self.add_results_button()\n\n def get_category_actions(\n self, category: actionhandler.ActionCategory\n ) -> list[QW.QAction]: # pragma: no cover\n \"\"\"Return actions for category\"\"\"\n return self.acthandler.feature_actions.get(category, [])\n\n def __popup_contextmenu(self, position: QC.QPoint) -> None: # pragma: no cover\n \"\"\"Popup context menu at position\"\"\"\n # Note: For now, this is completely unnecessary to clear context menu everytime,\n # but implementing it this way could be useful in the future in menu contents\n # should take into account current object selection\n self.context_menu.clear()\n actions = self.get_category_actions(actionhandler.ActionCategory.CONTEXT_MENU)\n add_actions(self.context_menu, actions)\n self.context_menu.popup(position)\n\n # ------Creating, adding, removing objects------------------------------------------\n def add_group(self, title: str) -> objectmodel.ObjectGroup:\n \"\"\"Add group\"\"\"\n group = self.objmodel.add_group(title)\n self.objview.add_group_item(group)\n return group\n\n # TODO: [P2] New feature: move objects up/down\n # TODO: [P2] New feature: move objects to another group\n def __duplicate_individual_obj(\n self, oid: str, new_group_id: str | None = None, set_current: bool = True\n ) -> None:\n \"\"\"Duplicate individual object\"\"\"\n obj = self.objmodel[oid]\n if new_group_id is None:\n new_group_id = self.objmodel.get_object_group_id(obj)\n self.add_object(obj.copy(), group_id=new_group_id, set_current=set_current)\n\n def duplicate_object(self) -> None:\n \"\"\"Duplication signal/image object\"\"\"\n if not self.mainwindow.confirm_memory_state():\n return\n # Duplicate individual objects (exclusive with respect to groups)\n for oid in self.objview.get_sel_object_uuids():\n self.__duplicate_individual_obj(oid, set_current=False)\n # Duplicate groups (exclusive with respect to individual objects)\n for group in self.objview.get_sel_groups():\n new_group = self.add_group(group.title)\n for oid in self.objmodel.get_group_object_ids(group.uuid):\n self.__duplicate_individual_obj(oid, new_group.uuid, set_current=False)\n self.selection_changed(update_items=True)\n\n def copy_metadata(self) -> None:\n \"\"\"Copy object metadata\"\"\"\n obj = self.objview.get_sel_objects()[0]\n self.__metadata_clipboard = obj.metadata.copy()\n new_pref = obj.short_id + \"_\"\n for key, value in obj.metadata.items():\n if ResultShape.match(key, value):\n mshape = ResultShape.from_metadata_entry(key, value)\n if not re.match(obj.PREFIX + r\"[0-9]{3}[\\s]*\", mshape.label):\n # Handling additional result (e.g. diameter)\n for a_key, a_value in obj.metadata.items():\n if isinstance(a_key, str) and a_key.startswith(mshape.label):\n self.__metadata_clipboard.pop(a_key)\n self.__metadata_clipboard[new_pref + a_key] = a_value\n mshape.label = new_pref + mshape.label\n # Handling result shape\n self.__metadata_clipboard.pop(key)\n self.__metadata_clipboard[mshape.key] = value\n\n def paste_metadata(self) -> None:\n \"\"\"Paste metadata to selected object(s)\"\"\"\n sel_objects = self.objview.get_sel_objects(include_groups=True)\n for obj in sorted(sel_objects, key=lambda obj: obj.short_id, reverse=True):\n obj.metadata.update(self.__metadata_clipboard)\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n def remove_object(self) -> None:\n \"\"\"Remove signal/image object\"\"\"\n sel_groups = self.objview.get_sel_groups()\n if sel_groups:\n answer = QW.QMessageBox.warning(\n self,\n _(\"Delete group(s)\"),\n _(\"Are you sure you want to delete the selected group(s)?\"),\n QW.QMessageBox.Yes | QW.QMessageBox.No,\n )\n if answer == QW.QMessageBox.No:\n return\n sel_objects = self.objview.get_sel_objects(include_groups=True)\n for obj in sorted(sel_objects, key=lambda obj: obj.short_id, reverse=True):\n for dlg, obj_i in self.__separate_views.items():\n if obj_i is obj:\n dlg.done(QW.QDialog.DialogCode.Rejected)\n self.plothandler.remove_item(obj.uuid)\n self.objview.remove_item(obj.uuid, refresh=False)\n self.objmodel.remove_object(obj)\n for group in sel_groups:\n self.objview.remove_item(group.uuid, refresh=False)\n self.objmodel.remove_group(group)\n self.objview.update_tree()\n self.selection_changed(update_items=True)\n self.SIG_OBJECT_REMOVED.emit()\n\n def delete_all_objects(self) -> None: # pragma: no cover\n \"\"\"Confirm before removing all objects\"\"\"\n if len(self) == 0:\n return\n answer = QW.QMessageBox.warning(\n self,\n _(\"Delete all\"),\n _(\"Do you want to delete all objects (%s)?\") % self.PANEL_STR,\n QW.QMessageBox.Yes | QW.QMessageBox.No,\n )\n if answer == QW.QMessageBox.Yes:\n self.remove_all_objects()\n\n def delete_metadata(self, refresh_plot: bool = True) -> None:\n \"\"\"Delete metadata of selected objects\n\n Args:\n refresh_plot (bool | None): Refresh plot. Defaults to True.\n \"\"\"\n for index, obj in enumerate(self.objview.get_sel_objects(include_groups=True)):\n obj.reset_metadata_to_defaults()\n if index == 0:\n self.selection_changed()\n if refresh_plot:\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n def add_annotations_from_items(\n self, items: list, refresh_plot: bool = True\n ) -> None:\n \"\"\"Add object annotations (annotation plot items).\n\n Args:\n items (list): annotation plot items\n refresh_plot (bool | None): refresh plot. Defaults to True.\n \"\"\"\n for obj in self.objview.get_sel_objects(include_groups=True):\n obj.add_annotations_from_items(items)\n if refresh_plot:\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n def update_metadata_view_settings(self) -> None:\n \"\"\"Update metadata view settings\"\"\"\n for obj in self.objmodel:\n obj.update_metadata_view_settings()\n self.SIG_REFRESH_PLOT.emit(\"all\", True)\n\n def copy_titles_to_clipboard(self) -> None:\n \"\"\"Copy object titles to clipboard (for reproducibility)\"\"\"\n QW.QApplication.clipboard().setText(str(self.objview))\n\n def new_group(self) -> None:\n \"\"\"Create a new group\"\"\"\n # Open a message box to enter the group name\n group_name, ok = QW.QInputDialog.getText(self, _(\"New group\"), _(\"Group name:\"))\n if ok:\n self.add_group(group_name)\n\n def rename_group(self) -> None:\n \"\"\"Rename a group\"\"\"\n # Open a message box to enter the group name\n group = self.objview.get_sel_groups()[0]\n group_name, ok = QW.QInputDialog.getText(\n self, _(\"Rename group\"), _(\"Group name:\"), QW.QLineEdit.Normal, group.title\n )\n if ok:\n group.title = group_name\n self.objview.update_item(group.uuid)\n\n @abc.abstractmethod\n def get_newparam_from_current(\n self, newparam: NewSignalParam | NewImageParam | None = None\n ) -> NewSignalParam | NewImageParam | None:\n \"\"\"Get new object parameters from the current object.\n\n Args:\n newparam (guidata.dataset.DataSet): new object parameters.\n If None, create a new one.\n\n Returns:\n New object parameters\n \"\"\"\n\n @abc.abstractmethod\n def new_object(\n self,\n newparam: NewSignalParam | NewImageParam | None = None,\n addparam: gds.DataSet | None = None,\n edit: bool = True,\n add_to_panel: bool = True,\n ) -> SignalObj | ImageObj | None:\n \"\"\"Create a new object (signal/image).\n\n Args:\n newparam (guidata.dataset.DataSet): new object parameters\n addparam (guidata.dataset.DataSet): additional parameters\n edit (bool): Open a dialog box to edit parameters (default: True)\n add_to_panel (bool): Add object to panel (default: True)\n\n Returns:\n New object\n \"\"\"\n\n def set_current_object_title(self, title: str) -> None:\n \"\"\"Set current object title\"\"\"\n obj = self.objview.get_current_object()\n obj.title = title\n self.objview.update_item(obj.uuid)\n\n def open_object(\n self, filename: str\n ) -> SignalObj | ImageObj | list[SignalObj | ImageObj]:\n \"\"\"Open object from file (signal/image), add it to DataLab and return it.\n\n Args:\n filename (str): file name\n\n Returns:\n New object or list of new objects\n \"\"\"\n obj_or_objlist = self.IO_REGISTRY.read(filename)\n objs = obj_or_objlist if isinstance(obj_or_objlist, list) else [obj_or_objlist]\n for obj in objs:\n self.add_object(obj, set_current=obj is objs[-1])\n self.selection_changed()\n if len(objs) == 1:\n return objs[0]\n return objs\n\n def save_object(self, obj, filename: str | None = None) -> None:\n \"\"\"Save object to file (signal/image)\"\"\"\n if filename is None:\n basedir = Conf.main.base_dir.get()\n filters = self.IO_REGISTRY.get_filters(IOAction.SAVE)\n with save_restore_stds():\n filename, _filt = getsavefilename(self, _(\"Save as\"), basedir, filters)\n if filename:\n with qt_try_loadsave_file(self.parent(), filename, \"save\"):\n Conf.main.base_dir.set(filename)\n self.IO_REGISTRY.write(filename, obj)\n\n def handle_dropped_files(self, filenames: list[str] | None = None) -> None:\n \"\"\"Handle dropped files\n\n Args:\n filenames (list(str)): File names\n\n Returns:\n None\n \"\"\"\n h5_fnames = [fname for fname in filenames if fname.endswith(\".h5\")]\n other_fnames = list(set(filenames) - set(h5_fnames))\n if h5_fnames:\n self.mainwindow.open_h5_files(h5_fnames, import_all=True)\n if other_fnames:\n self.open_objects(other_fnames)\n\n def open_objects(\n self, filenames: list[str] | None = None\n ) -> list[SignalObj | ImageObj]:\n \"\"\"Open objects from file (signals/images), add them to DataLab and return them.\n\n Args:\n filenames (list(str)): File names\n\n Returns:\n list of new objects\n \"\"\"\n if not self.mainwindow.confirm_memory_state():\n return []\n if filenames is None: # pragma: no cover\n basedir = Conf.main.base_dir.get()\n filters = self.IO_REGISTRY.get_filters(IOAction.LOAD)\n with save_restore_stds():\n filenames, _filt = getopenfilenames(self, _(\"Open\"), basedir, filters)\n objs = []\n for filename in filenames:\n with qt_try_loadsave_file(self.parent(), filename, \"load\"):\n Conf.main.base_dir.set(filename)\n objs.append(self.open_object(filename))\n return objs\n\n def save_objects(self, filenames: list[str] | None = None) -> None:\n \"\"\"Save selected objects to file (signal/image).\n\n Args:\n filenames (list(str)): File names\n\n Returns:\n None\n \"\"\"\n objs = self.objview.get_sel_objects(include_groups=True)\n if filenames is None: # pragma: no cover\n filenames = [None] * len(objs)\n assert len(filenames) == len(objs)\n for index, obj in enumerate(objs):\n filename = filenames[index]\n self.save_object(obj, filename)\n\n def import_metadata_from_file(self, filename: str | None = None) -> None:\n \"\"\"Import metadata from file (JSON).\n\n Args:\n filename (str): File name\n\n Returns:\n None\n \"\"\"\n if filename is None: # pragma: no cover\n basedir = Conf.main.base_dir.get()\n with save_restore_stds():\n filename, _filter = getopenfilename(\n self, _(\"Import metadata\"), basedir, \"*.json\"\n )\n if filename:\n with qt_try_loadsave_file(self.parent(), filename, \"load\"):\n Conf.main.base_dir.set(filename)\n obj = self.objview.get_sel_objects(include_groups=True)[0]\n obj.import_metadata_from_file(filename)\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n def export_metadata_from_file(self, filename: str | None = None) -> None:\n \"\"\"Export metadata to file (JSON).\n\n Args:\n filename (str): File name\n\n Returns:\n None\n \"\"\"\n obj = self.objview.get_sel_objects(include_groups=True)[0]\n if filename is None: # pragma: no cover\n basedir = Conf.main.base_dir.get()\n with save_restore_stds():\n filename, _filt = getsavefilename(\n self, _(\"Export metadata\"), basedir, \"*.json\"\n )\n if filename:\n with qt_try_loadsave_file(self.parent(), filename, \"save\"):\n Conf.main.base_dir.set(filename)\n obj.export_metadata_to_file(filename)\n\n # ------Refreshing GUI--------------------------------------------------------------\n def selection_changed(self, update_items: bool = False) -> None:\n \"\"\"Object selection changed: update object properties, refresh plot and update\n object view.\n\n Args:\n update_items (bool): Update plot items (default: False)\n \"\"\"\n selected_objects = self.objview.get_sel_objects(include_groups=True)\n selected_groups = self.objview.get_sel_groups()\n self.objprop.update_properties_from(self.objview.get_current_object())\n self.acthandler.selected_objects_changed(selected_groups, selected_objects)\n self.SIG_REFRESH_PLOT.emit(\"selected\", update_items)\n\n def properties_changed(self) -> None:\n \"\"\"The properties 'Apply' button was clicked: update object properties,\n refresh plot and update object view.\"\"\"\n obj = self.objview.get_current_object()\n update_dataset(obj, self.objprop.properties.dataset)\n self.objview.update_item(obj.uuid)\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n # ------Plotting data in modal dialogs----------------------------------------------\n def open_separate_view(self, oids: list[str] | None = None) -> QW.QDialog | None:\n \"\"\"\n Open separate view for visualizing selected objects\n\n Args:\n oids (list(str)): Object IDs\n\n Returns:\n QDialog instance\n \"\"\"\n title = _(\"Annotations\")\n if oids is None:\n oids = self.objview.get_sel_object_uuids(include_groups=True)\n obj = self.objmodel[oids[0]]\n dlg = self.create_new_dialog(oids, edit=True, name=\"new_window\")\n if dlg is None:\n return None\n width, height = self.DIALOGSIZE\n dlg.resize(width, height)\n mgr = dlg.get_manager()\n mgr.get_itemlist_panel().show()\n toolbar = QW.QToolBar(title, self)\n dlg.button_layout.insertWidget(0, toolbar)\n # dlg.layout().insertWidget(1, toolbar) # other possible location\n # dlg.plot_layout.addWidget(toolbar, 1, 0, 1, 1) # other possible location\n mgr.add_toolbar(toolbar, id(toolbar))\n toolbar.setToolButtonStyle(QC.Qt.ToolButtonTextUnderIcon)\n for tool in self.ANNOTATION_TOOLS:\n mgr.add_tool(tool, toolbar_id=id(toolbar))\n plot = dlg.get_plot()\n plot.unselect_all()\n for item in plot.items:\n item.set_selectable(False)\n for item in obj.iterate_shape_items(editable=True):\n plot.add_item(item)\n self.__separate_views[dlg] = obj\n dlg.show()\n dlg.finished.connect(self.__separate_view_finished)\n return dlg\n\n def __separate_view_finished(self, result: int) -> None:\n \"\"\"Separate view was closed\n\n Args:\n result: result\n \"\"\"\n dlg: PlotDialog = self.sender()\n if result == QW.QDialog.DialogCode.Accepted:\n rw_items = []\n for item in dlg.get_plot().get_items():\n if not item.is_readonly() and is_plot_item_serializable(item):\n rw_items.append(item)\n obj = self.__separate_views[dlg]\n obj.annotations = items_to_json(rw_items)\n self.selection_changed(update_items=True)\n self.__separate_views.pop(dlg)\n dlg.deleteLater()\n\n def manual_refresh(self) -> None:\n \"\"\"Manual refresh\"\"\"\n self.plothandler.refresh_plot(\"selected\", True, force=True)\n\n def create_new_dialog(\n self,\n oids: list[str],\n edit: bool = False,\n toolbar: bool = True,\n title: str | None = None,\n tools: list[GuiTool] | None = None,\n name: str | None = None,\n options: dict | None = None,\n ) -> PlotDialog | None:\n \"\"\"Create new pop-up signal/image plot dialog.\n\n Args:\n oids (list(str)): Object IDs\n edit (bool): Edit mode\n toolbar (bool): Show toolbar\n title (str): Dialog title\n tools (list(GuiTool)): list of tools to add to the toolbar\n name (str): Dialog name\n options (dict): Plot options\n\n Returns:\n QDialog instance\n \"\"\"\n if title is not None or len(oids) == 1:\n if title is None:\n title = self.objview.get_sel_objects(include_groups=True)[0].title\n title = f\"{title} - {APP_NAME}\"\n else:\n title = APP_NAME\n\n plot_options = self.plothandler.get_current_plot_options()\n if options is not None:\n plot_options = plot_options.copy(options)\n\n # pylint: disable=not-callable\n dlg = PlotDialog(\n parent=self,\n title=title,\n edit=edit,\n options=plot_options,\n toolbar=toolbar,\n )\n dlg.setWindowIcon(get_icon(\"DataLab.svg\"))\n if tools is not None:\n for tool in tools:\n dlg.get_manager().add_tool(tool)\n plot = dlg.get_plot()\n\n objs = self.objmodel.get_objects(oids)\n dlg.setObjectName(f\"{objs[0].PREFIX}_{name}\")\n\n with create_progress_bar(\n self, _(\"Creating plot items\"), max_=len(objs)\n ) as progress:\n for index, obj in enumerate(objs):\n progress.setValue(index + 1)\n QW.QApplication.processEvents()\n if progress.wasCanceled():\n return None\n item = obj.make_item(update_from=self.plothandler[obj.uuid])\n item.set_readonly(True)\n plot.add_item(item, z=0)\n plot.set_active_item(item)\n plot.replot()\n return dlg\n\n def create_new_dialog_for_selection(\n self,\n title: str,\n name: str,\n options: dict[str, any] = None,\n toolbar: bool = False,\n tools: list[GuiTool] = None,\n ) -> tuple[QW.QDialog | None, SignalObj | ImageObj]:\n \"\"\"Create new pop-up dialog for the currently selected signal/image.\n\n Args:\n title (str): Dialog title\n name (str): Dialog name\n options (dict): Plot options\n toolbar (bool): Show toolbar\n tools (list(GuiTool)): list of tools to add to the toolbar\n\n Returns:\n QDialog instance, selected object\n \"\"\"\n obj = self.objview.get_sel_objects(include_groups=True)[0]\n dlg = self.create_new_dialog(\n [obj.uuid],\n edit=True,\n toolbar=toolbar,\n title=f\"{title} - {obj.title}\",\n tools=tools,\n name=name,\n options=options,\n )\n return dlg, obj\n\n def get_roi_dialog(\n self, extract: bool, singleobj: bool\n ) -> cdl.core.computation.base.ROIDataParam:\n \"\"\"Get ROI data (array) from specific dialog box.\n\n Args:\n extract (bool): Extract ROI from data\n singleobj (bool): Single object\n\n Returns:\n ROI data\n \"\"\"\n roi_s = _(\"Regions of interest\")\n options = self.ROIDIALOGOPTIONS\n dlg, obj = self.create_new_dialog_for_selection(roi_s, \"roi_dialog\", options)\n if dlg is None:\n return None\n plot = dlg.get_plot()\n plot.unselect_all()\n for item in plot.items:\n item.set_selectable(False)\n # pylint: disable=not-callable\n roi_editor = self.ROIDIALOGCLASS(dlg, obj, extract, singleobj)\n dlg.plot_layout.addWidget(roi_editor, 1, 0, 1, 1)\n if exec_dialog(dlg):\n return roi_editor.get_data()\n return None\n\n def get_object_with_dialog(\n self, title: str, parent: QW.QWidget | None = None\n ) -> SignalObj | ImageObj | None:\n \"\"\"Get object with dialog box.\n\n Args:\n title: Dialog title\n parent: Parent widget\n\n Returns:\n Object (signal or image, or None if dialog was canceled)\n \"\"\"\n parent = self if parent is None else parent\n dlg = objectview.GetObjectDialog(parent, self, title)\n if exec_dialog(dlg):\n obj_uuid = dlg.get_current_object_uuid()\n return self.objmodel[obj_uuid]\n return None\n\n def add_results_button(self) -> None:\n \"\"\"Add 'Show results' button\"\"\"\n btn = QW.QPushButton(get_icon(\"show_results.svg\"), _(\"Show results\"), self)\n btn.setToolTip(_(\"Show results obtained from previous computations\"))\n self.objprop.add_button(btn)\n btn.clicked.connect(self.show_results)\n self.acthandler.add_action(\n btn,\n select_condition=actionhandler.SelectCond.at_least_one,\n )\n\n def show_results(self) -> None:\n \"\"\"Show results\"\"\"\n\n @dataclasses.dataclass\n class ResultData:\n \"\"\"Result data associated to a shapetype\"\"\"\n\n results: list[ResultShape] = None\n xlabels: list[str] = None\n ylabels: list[str] = None\n\n rdatadict: dict[ShapeTypes, ResultData] = {}\n objs = self.objview.get_sel_objects(include_groups=True)\n for obj in objs:\n for result in obj.iterate_resultshapes():\n rdata = rdatadict.setdefault(result.shapetype, ResultData([], None, []))\n title = f\"{result.label}\"\n rdata.results.append(result)\n rdata.xlabels = result.xlabels\n for _i_row_res in range(result.array.shape[0]):\n ylabel = f\"{obj.short_id}: {result.label}\"\n rdata.ylabels.append(ylabel)\n if rdatadict:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n for rdata in rdatadict.values():\n dlg = ArrayEditor(self.parent())\n title = _(\"Results\")\n dlg.setup_and_check(\n np.vstack([result.array for result in rdata.results]),\n title,\n readonly=True,\n xlabels=rdata.xlabels,\n ylabels=rdata.ylabels,\n )\n dlg.setObjectName(f\"{objs[0].PREFIX}_results\")\n dlg.resize(750, 300)\n exec_dialog(dlg)\n else:\n msg = \"<br>\".join(\n [\n _(\"No result currently available for this object.\"),\n \"\",\n _(\n \"This feature shows result arrays as displayed after \"\n 'calling one of the computing feature (see \"Compute\" menu).'\n ),\n ]\n )\n QW.QMessageBox.information(self, APP_NAME, msg)\n\n def add_label_with_title(self, title: str | None = None) -> None:\n \"\"\"Add a label with object title on the associated plot\n\n Args:\n title (str | None): Label title. Defaults to None.\n If None, the title is the object title.\n \"\"\"\n objs = self.objview.get_sel_objects(include_groups=True)\n for obj in objs:\n obj.add_label_with_title(title=title)\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)" }, { "identifier": "SignalPlotHandler", "path": "cdl/core/gui/plothandler.py", "snippet": "class SignalPlotHandler(BasePlotHandler):\n \"\"\"Object handling signal plot items, plot dialogs, plot options\"\"\"\n\n PLOT_TYPE = PlotType.CURVE\n\n def toggle_anti_aliasing(self, state: bool) -> None:\n \"\"\"Toggle anti-aliasing\n\n Args:\n state: if True, enable anti-aliasing\n \"\"\"\n self.plot.set_antialiasing(state)\n self.plot.replot()" }, { "identifier": "SignalProcessor", "path": "cdl/core/gui/processor/signal.py", "snippet": "class SignalProcessor(BaseProcessor):\n \"\"\"Object handling signal processing: operations, processing, computing\"\"\"\n\n # pylint: disable=duplicate-code\n\n @qt_try_except()\n def compute_sum(self) -> None:\n \"\"\"Compute sum\"\"\"\n self.compute_n1(\"Σ\", cps.compute_add, title=_(\"Sum\"))\n\n @qt_try_except()\n def compute_average(self) -> None:\n \"\"\"Compute average\"\"\"\n\n def func_objs(new_obj: SignalObj, old_objs: list[SignalObj]) -> None:\n \"\"\"Finalize average computation\"\"\"\n new_obj.data = new_obj.data / float(len(old_objs))\n if new_obj.dy is not None:\n new_obj.dy = new_obj.dy / float(len(old_objs))\n\n self.compute_n1(\"μ\", cps.compute_add, func_objs=func_objs, title=_(\"Average\"))\n\n @qt_try_except()\n def compute_product(self) -> None:\n \"\"\"Compute product\"\"\"\n self.compute_n1(\"Π\", cps.compute_product, title=_(\"Product\"))\n\n @qt_try_except()\n def compute_roi_extraction(\n self, param: cdl.param.ROIDataParam | None = None\n ) -> None:\n \"\"\"Extract Region Of Interest (ROI) from data\"\"\"\n param = self._get_roidataparam(param)\n if param is None or param.is_empty:\n return\n obj = self.panel.objview.get_sel_objects()[0]\n group = obj.roidata_to_params(param.roidata)\n if param.singleobj:\n self.compute_11(cps.extract_multiple_roi, group, title=_(\"Extract ROI\"))\n else:\n self.compute_1n(cps.extract_single_roi, group.datasets, \"ROI\", edit=False)\n\n @qt_try_except()\n def compute_swap_axes(self) -> None:\n \"\"\"Swap data axes\"\"\"\n self.compute_11(cps.compute_swap_axes, title=_(\"Swap axes\"))\n\n @qt_try_except()\n def compute_abs(self) -> None:\n \"\"\"Compute absolute value\"\"\"\n self.compute_11(cps.compute_abs, title=_(\"Absolute value\"))\n\n @qt_try_except()\n def compute_re(self) -> None:\n \"\"\"Compute real part\"\"\"\n self.compute_11(cps.compute_re, title=_(\"Real part\"))\n\n @qt_try_except()\n def compute_im(self) -> None:\n \"\"\"Compute imaginary part\"\"\"\n self.compute_11(cps.compute_im, title=_(\"Imaginary part\"))\n\n @qt_try_except()\n def compute_astype(self, param: cdl.param.DataTypeSParam | None = None) -> None:\n \"\"\"Convert data type\"\"\"\n self.compute_11(\n cps.compute_astype, param, cps.DataTypeSParam, title=_(\"Convert data type\")\n )\n\n @qt_try_except()\n def compute_log10(self) -> None:\n \"\"\"Compute Log10\"\"\"\n self.compute_11(cps.compute_log10, title=\"Log10\")\n\n @qt_try_except()\n def compute_difference(self, obj2: SignalObj | None = None) -> None:\n \"\"\"Compute difference between two signals\"\"\"\n self.compute_n1n(\n obj2,\n _(\"signal to subtract\"),\n cps.compute_difference,\n title=_(\"Difference\"),\n )\n\n @qt_try_except()\n def compute_quadratic_difference(self, obj2: SignalObj | None = None) -> None:\n \"\"\"Compute quadratic difference between two signals\"\"\"\n self.compute_n1n(\n obj2,\n _(\"signal to subtract\"),\n cps.compute_quadratic_difference,\n title=_(\"Quadratic difference\"),\n )\n\n @qt_try_except()\n def compute_division(self, obj2: SignalObj | None = None) -> None:\n \"\"\"Compute division between two signals\"\"\"\n self.compute_n1n(\n obj2,\n _(\"divider\"),\n cps.compute_division,\n title=_(\"Division\"),\n )\n\n @qt_try_except()\n def compute_peak_detection(\n self, param: cdl.param.PeakDetectionParam | None = None\n ) -> None:\n \"\"\"Detect peaks from data\"\"\"\n obj = self.panel.objview.get_sel_objects()[0]\n edit, param = self.init_param(\n param, cps.PeakDetectionParam, _(\"Peak detection\")\n )\n if edit:\n dlg = signalpeakdialog.SignalPeakDetectionDialog(self.panel)\n dlg.setup_data(obj.x, obj.y)\n if exec_dialog(dlg):\n param.threshold = int(dlg.get_threshold() * 100)\n param.min_dist = dlg.get_min_dist()\n else:\n return\n self.compute_11(cps.compute_peak_detection, param)\n\n # ------Signal Processing\n @qt_try_except()\n def compute_normalize(self, param: cdl.param.NormalizeYParam | None = None) -> None:\n \"\"\"Normalize data\"\"\"\n self.compute_11(\n cps.compute_normalize, param, cps.NormalizeYParam, title=_(\"Normalize\")\n )\n\n @qt_try_except()\n def compute_derivative(self) -> None:\n \"\"\"Compute derivative\"\"\"\n self.compute_11(cps.compute_derivative, title=_(\"Derivative\"))\n\n @qt_try_except()\n def compute_integral(self) -> None:\n \"\"\"Compute integral\"\"\"\n self.compute_11(cps.compute_integral, title=_(\"Integral\"))\n\n @qt_try_except()\n def compute_calibration(\n self, param: cdl.param.XYCalibrateParam | None = None\n ) -> None:\n \"\"\"Compute data linear calibration\"\"\"\n self.compute_11(\n cps.compute_calibration,\n param,\n cps.XYCalibrateParam,\n title=_(\"Linear calibration\"),\n comment=\"y = a.x + b\",\n )\n\n @qt_try_except()\n def compute_threshold(self, param: cpb.ThresholdParam | None = None) -> None:\n \"\"\"Compute threshold clipping\"\"\"\n self.compute_11(\n cps.compute_threshold, param, cpb.ThresholdParam, title=_(\"Thresholding\")\n )\n\n @qt_try_except()\n def compute_clip(self, param: cpb.ClipParam | None = None) -> None:\n \"\"\"Compute maximum data clipping\"\"\"\n self.compute_11(cps.compute_clip, param, cpb.ClipParam, title=_(\"Clipping\"))\n\n @qt_try_except()\n def compute_gaussian_filter(self, param: cpb.GaussianParam | None = None) -> None:\n \"\"\"Compute gaussian filter\"\"\"\n self.compute_11(\n cps.compute_gaussian_filter,\n param,\n cpb.GaussianParam,\n title=_(\"Gaussian filter\"),\n )\n\n @qt_try_except()\n def compute_moving_average(\n self, param: cpb.MovingAverageParam | None = None\n ) -> None:\n \"\"\"Compute moving average\"\"\"\n self.compute_11(\n cps.compute_moving_average,\n param,\n cpb.MovingAverageParam,\n title=_(\"Moving average\"),\n )\n\n @qt_try_except()\n def compute_moving_median(self, param: cpb.MovingMedianParam | None = None) -> None:\n \"\"\"Compute moving median\"\"\"\n self.compute_11(\n cps.compute_moving_median,\n param,\n cpb.MovingMedianParam,\n title=_(\"Moving median\"),\n )\n\n @qt_try_except()\n def compute_wiener(self) -> None:\n \"\"\"Compute Wiener filter\"\"\"\n self.compute_11(cps.compute_wiener, title=_(\"Wiener filter\"))\n\n @qt_try_except()\n def compute_fft(self, param: cdl.param.FFTParam | None = None) -> None:\n \"\"\"Compute iFFT\"\"\"\n if param is None:\n param = cpb.FFTParam.create(shift=Conf.proc.fft_shift_enabled.get())\n self.compute_11(cps.compute_fft, param, title=_(\"FFT\"), edit=False)\n\n @qt_try_except()\n def compute_ifft(self, param: cdl.param.FFTParam | None = None) -> None:\n \"\"\"Compute FFT\"\"\"\n if param is None:\n param = cpb.FFTParam.create(shift=Conf.proc.fft_shift_enabled.get())\n self.compute_11(cps.compute_ifft, param, title=_(\"iFFT\"), edit=False)\n\n @qt_try_except()\n def compute_interpolation(\n self,\n obj2: SignalObj | None = None,\n param: cdl.param.InterpolationParam | None = None,\n ):\n \"\"\"Compute interpolation\"\"\"\n self.compute_n1n(\n obj2,\n _(\"signal for X values\"),\n cps.compute_interpolation,\n param,\n cps.InterpolationParam,\n title=_(\"Interpolation\"),\n )\n\n @qt_try_except()\n def compute_resampling(self, param: cdl.param.ResamplingParam | None = None):\n \"\"\"Compute resampling\"\"\"\n edit, param = self.init_param(param, cps.ResamplingParam, _(\"Resampling\"))\n if edit:\n obj = self.panel.objview.get_sel_objects()[0]\n if param.xmin is None:\n param.xmin = obj.x[0]\n if param.xmax is None:\n param.xmax = obj.x[-1]\n if param.dx is None:\n param.dx = obj.x[1] - obj.x[0]\n if param.nbpts is None:\n param.nbpts = len(obj.x)\n self.compute_11(\n cps.compute_resampling,\n param,\n cps.ResamplingParam,\n title=_(\"Resampling\"),\n edit=edit,\n )\n\n @qt_try_except()\n def compute_detrending(self, param: cdl.param.DetrendingParam | None = None):\n \"\"\"Compute detrending\"\"\"\n self.compute_11(\n cps.compute_detrending,\n param,\n cps.DetrendingParam,\n title=_(\"Detrending\"),\n )\n\n @qt_try_except()\n def compute_convolution(self, obj2: SignalObj | None = None) -> None:\n \"\"\"Compute convolution\"\"\"\n self.compute_n1n(\n obj2,\n _(\"signal to convolve with\"),\n cps.compute_convolution,\n title=_(\"Convolution\"),\n )\n\n @qt_try_except()\n def compute_fit(self, name, fitdlgfunc):\n \"\"\"Compute fitting curve\"\"\"\n for obj in self.panel.objview.get_sel_objects():\n self.__row_compute_fit(obj, name, fitdlgfunc)\n\n @qt_try_except()\n def compute_polyfit(\n self, param: cdl.param.PolynomialFitParam | None = None\n ) -> None:\n \"\"\"Compute polynomial fitting curve\"\"\"\n txt = _(\"Polynomial fit\")\n edit, param = self.init_param(param, cps.PolynomialFitParam, txt)\n if not edit or param.edit(self.panel.parent()):\n dlgfunc = fitdialog.polynomialfit\n self.compute_fit(\n txt,\n lambda x, y, degree=param.degree, parent=self.panel.parent(): dlgfunc(\n x, y, degree, parent=parent\n ),\n )\n\n def __row_compute_fit(\n self, obj: SignalObj, name: str, fitdlgfunc: Callable\n ) -> None:\n \"\"\"Curve fitting computing sub-method\"\"\"\n output = fitdlgfunc(obj.x, obj.y, parent=self.panel.parent())\n if output is not None:\n y, params = output\n results = {}\n for param in params:\n if re.match(r\"[\\S\\_]*\\d{2}$\", param.name):\n shname = param.name[:-2]\n value = results.get(shname, np.array([]))\n results[shname] = np.array(list(value) + [param.value])\n else:\n results[param.name] = param.value\n # Creating new signal\n signal = create_signal(f\"{name}({obj.title})\", obj.x, y, metadata=results)\n # Creating new plot item\n self.panel.add_object(signal)\n\n @qt_try_except()\n def compute_multigaussianfit(self) -> None:\n \"\"\"Compute multi-Gaussian fitting curve\"\"\"\n fitdlgfunc = fitdialog.multigaussianfit\n for obj in self.panel.objview.get_sel_objects():\n dlg = signalpeakdialog.SignalPeakDetectionDialog(self.panel)\n dlg.setup_data(obj.x, obj.y)\n if exec_dialog(dlg):\n # Computing x, y\n peaks = dlg.get_peak_indexes()\n self.__row_compute_fit(\n obj,\n _(\"Multi-Gaussian fit\"),\n lambda x, y, peaks=peaks, parent=self.panel.parent(): fitdlgfunc(\n x, y, peaks, parent=parent\n ),\n )\n\n # ------Signal Computing\n @qt_try_except()\n def compute_fwhm(self, param: cdl.param.FWHMParam | None = None) -> None:\n \"\"\"Compute FWHM\"\"\"\n self.compute_10(\n cps.compute_fwhm, ShapeTypes.SEGMENT, param, cps.FWHMParam, title=_(\"FWHM\")\n )\n\n @qt_try_except()\n def compute_fw1e2(self) -> None:\n \"\"\"Compute FW at 1/e²\"\"\"\n self.compute_10(cps.compute_fw1e2, ShapeTypes.SEGMENT, title=_(\"FW\") + \"1/e²\")\n\n def _get_stat_funcs(self) -> list[tuple[str, Callable[[np.ndarray], float]]]:\n \"\"\"Return statistics functions list\"\"\"\n return [\n (\"min(y)\", lambda xy: xy[1].min()),\n (\"max(y)\", lambda xy: xy[1].max()),\n (\"<y>\", lambda xy: xy[1].mean()),\n (\"Median(y)\", lambda xy: np.median(xy[1])),\n (\"σ(y)\", lambda xy: xy[1].std()),\n (\"Σ(y)\", lambda xy: xy[1].sum()),\n (\"∫ydx\", lambda xy: np.trapz(xy[1], xy[0])),\n ]" }, { "identifier": "SignalIORegistry", "path": "cdl/core/io/signal/base.py", "snippet": "class SignalIORegistry(BaseIORegistry):\n \"\"\"Metaclass for registering signal I/O handler classes\"\"\"\n\n _io_format_instances: list[SignalFormatBase] = []" }, { "identifier": "SignalObj", "path": "cdl/core/model/signal.py", "snippet": "class SignalObj(gds.DataSet, base.BaseObj):\n \"\"\"Signal object\"\"\"\n\n PREFIX = \"s\"\n CONF_FMT = Conf.view.sig_format\n DEFAULT_FMT = \"g\"\n VALID_DTYPES = (np.float32, np.float64, np.complex128)\n\n uuid = gds.StringItem(\"UUID\").set_prop(\"display\", hide=True)\n\n _tabs = gds.BeginTabGroup(\"all\")\n\n _datag = gds.BeginGroup(_(\"Data and metadata\"))\n title = gds.StringItem(_(\"Signal title\"), default=_(\"Untitled\"))\n xydata = gds.FloatArrayItem(_(\"Data\"), transpose=True, minmax=\"rows\")\n metadata = gds.DictItem(_(\"Metadata\"), default={})\n _e_datag = gds.EndGroup(_(\"Data and metadata\"))\n\n _unitsg = gds.BeginGroup(_(\"Titles and units\"))\n title = gds.StringItem(_(\"Signal title\"), default=_(\"Untitled\"))\n _tabs_u = gds.BeginTabGroup(\"units\")\n _unitsx = gds.BeginGroup(_(\"X-axis\"))\n xlabel = gds.StringItem(_(\"Title\"), default=\"\")\n xunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsx = gds.EndGroup(_(\"X-axis\"))\n _unitsy = gds.BeginGroup(_(\"Y-axis\"))\n ylabel = gds.StringItem(_(\"Title\"), default=\"\")\n yunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsy = gds.EndGroup(_(\"Y-axis\"))\n _e_tabs_u = gds.EndTabGroup(\"units\")\n _e_unitsg = gds.EndGroup(_(\"Titles and units\"))\n\n _e_tabs = gds.EndTabGroup(\"all\")\n\n def __init__(self, title=None, comment=None, icon=\"\"):\n \"\"\"Constructor\n\n Args:\n title (str): title\n comment (str): comment\n icon (str): icon\n \"\"\"\n gds.DataSet.__init__(self, title, comment, icon)\n base.BaseObj.__init__(self)\n self.regenerate_uuid()\n\n def regenerate_uuid(self):\n \"\"\"Regenerate UUID\n\n This method is used to regenerate UUID after loading the object from a file.\n This is required to avoid UUID conflicts when loading objects from file\n without clearing the workspace first.\n \"\"\"\n self.uuid = str(uuid4())\n\n def copy(\n self, title: str | None = None, dtype: np.dtype | None = None\n ) -> SignalObj:\n \"\"\"Copy object.\n\n Args:\n title (str): title\n dtype (numpy.dtype): data type\n\n Returns:\n SignalObj: copied object\n \"\"\"\n title = self.title if title is None else title\n obj = SignalObj(title=title)\n obj.title = title\n if dtype not in (None, float, complex, np.complex128):\n raise RuntimeError(\"Signal data only supports float64/complex128 dtype\")\n obj.metadata = deepcopy(self.metadata)\n obj.xydata = np.array(self.xydata, copy=True, dtype=dtype)\n return obj\n\n def set_data_type(self, dtype: np.dtype) -> None: # pylint: disable=unused-argument\n \"\"\"Change data type.\n\n Args:\n dtype (numpy.dtype): data type\n \"\"\"\n raise RuntimeError(\"Setting data type is not support for signals\")\n\n def set_xydata(\n self,\n x: np.ndarray | list,\n y: np.ndarray | list,\n dx: np.ndarray | list | None = None,\n dy: np.ndarray | list | None = None,\n ) -> None:\n \"\"\"Set xy data\n\n Args:\n x (numpy.ndarray): x data\n y (numpy.ndarray): y data\n dx (numpy.ndarray): dx data (optional: error bars)\n dy (numpy.ndarray): dy data (optional: error bars)\n \"\"\"\n if x is not None:\n x = np.array(x)\n if y is not None:\n y = np.array(y)\n if dx is not None:\n dx = np.array(dx)\n if dy is not None:\n dy = np.array(dy)\n if dx is None and dy is None:\n self.xydata = np.vstack([x, y])\n else:\n if dx is None:\n dx = np.zeros_like(dy)\n if dy is None:\n dy = np.zeros_like(dx)\n self.xydata = np.vstack((x, y, dx, dy))\n\n def __get_x(self) -> np.ndarray | None:\n \"\"\"Get x data\"\"\"\n if self.xydata is not None:\n return self.xydata[0]\n return None\n\n def __set_x(self, data) -> None:\n \"\"\"Set x data\"\"\"\n self.xydata[0] = np.array(data)\n\n def __get_y(self) -> np.ndarray | None:\n \"\"\"Get y data\"\"\"\n if self.xydata is not None:\n return self.xydata[1]\n return None\n\n def __set_y(self, data) -> None:\n \"\"\"Set y data\"\"\"\n self.xydata[1] = np.array(data)\n\n def __get_dx(self) -> np.ndarray | None:\n \"\"\"Get dx data\"\"\"\n if self.xydata is not None and len(self.xydata) > 2:\n return self.xydata[2]\n return None\n\n def __set_dx(self, data) -> None:\n \"\"\"Set dx data\"\"\"\n if self.xydata is not None and len(self.xydata) > 2:\n self.xydata[2] = np.array(data)\n else:\n raise ValueError(\"dx data not available\")\n\n def __get_dy(self) -> np.ndarray | None:\n \"\"\"Get dy data\"\"\"\n if self.xydata is not None and len(self.xydata) > 3:\n return self.xydata[3]\n return None\n\n def __set_dy(self, data) -> None:\n \"\"\"Set dy data\"\"\"\n if self.xydata is not None and len(self.xydata) > 3:\n self.xydata[3] = np.array(data)\n else:\n raise ValueError(\"dy data not available\")\n\n x = property(__get_x, __set_x)\n y = data = property(__get_y, __set_y)\n dx = property(__get_dx, __set_dx)\n dy = property(__get_dy, __set_dy)\n\n def get_data(self, roi_index: int | None = None) -> np.ndarray:\n \"\"\"\n Return original data (if ROI is not defined or `roi_index` is None),\n or ROI data (if both ROI and `roi_index` are defined).\n\n Args:\n roi_index (int): ROI index\n\n Returns:\n numpy.ndarray: data\n \"\"\"\n if self.roi is None or roi_index is None:\n return self.x, self.y\n i1, i2 = self.roi[roi_index, :]\n return self.x[i1:i2], self.y[i1:i2]\n\n def update_plot_item_parameters(self, item: CurveItem) -> None:\n \"\"\"Update plot item parameters from object data/metadata\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been overriden by object metadata entries or other object data. The goal\n is to update the plot item accordingly.\n\n This is *almost* the inverse operation of `update_metadata_from_plot_item`.\n\n Args:\n item: plot item\n \"\"\"\n update_dataset(item.param.line, self.metadata)\n update_dataset(item.param.symbol, self.metadata)\n super().update_plot_item_parameters(item)\n\n def update_metadata_from_plot_item(self, item: CurveItem) -> None:\n \"\"\"Update metadata from plot item.\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been modified by the user through the plot item GUI. The goal is to\n update the metadata accordingly.\n\n This is *almost* the inverse operation of `update_plot_item_parameters`.\n\n Args:\n item: plot item\n \"\"\"\n super().update_metadata_from_plot_item(item)\n restore_dataset(item.param.line, self.metadata)\n restore_dataset(item.param.symbol, self.metadata)\n\n def make_item(self, update_from: CurveItem = None) -> CurveItem:\n \"\"\"Make plot item from data.\n\n Args:\n update_from (CurveItem): plot item to update from\n\n Returns:\n CurveItem: plot item\n \"\"\"\n if len(self.xydata) in (2, 3, 4):\n if len(self.xydata) == 2: # x, y signal\n x, y = self.xydata\n item = make.mcurve(x.real, y.real, label=self.title)\n elif len(self.xydata) == 3: # x, y, dy error bar signal\n x, y, dy = self.xydata\n item = make.merror(x.real, y.real, dy.real, label=self.title)\n elif len(self.xydata) == 4: # x, y, dx, dy error bar signal\n x, y, dx, dy = self.xydata\n item = make.merror(x.real, y.real, dx.real, dy.real, label=self.title)\n CurveStyles.apply_style(item.param)\n else:\n raise RuntimeError(\"data not supported\")\n if update_from is None:\n if execenv.demo_mode:\n item.param.line.width = 3\n self.update_plot_item_parameters(item)\n else:\n update_dataset(item.param, update_from.param)\n item.update_params()\n return item\n\n def update_item(self, item: CurveItem, data_changed: bool = True) -> None:\n \"\"\"Update plot item from data.\n\n Args:\n item (CurveItem): plot item\n data_changed (bool): if True, data has changed\n \"\"\"\n if data_changed:\n if len(self.xydata) == 2: # x, y signal\n x, y = self.xydata\n item.set_data(x.real, y.real)\n elif len(self.xydata) == 3: # x, y, dy error bar signal\n x, y, dy = self.xydata\n item.set_data(x.real, y.real, dy=dy.real)\n elif len(self.xydata) == 4: # x, y, dx, dy error bar signal\n x, y, dx, dy = self.xydata\n item.set_data(x.real, y.real, dx.real, dy.real)\n item.param.label = self.title\n self.update_plot_item_parameters(item)\n\n def roi_coords_to_indexes(self, coords: list) -> np.ndarray:\n \"\"\"Convert ROI coordinates to indexes.\n\n Args:\n coords (list): coordinates\n\n Returns:\n numpy.ndarray: indexes\n \"\"\"\n indexes = np.array(coords, int)\n for row in range(indexes.shape[0]):\n for col in range(indexes.shape[1]):\n x0 = coords[row][col]\n indexes[row, col] = np.abs(self.x - x0).argmin()\n return indexes\n\n def get_roi_param(self, title: str, *defaults) -> gds.DataSet:\n \"\"\"Return ROI parameters dataset.\n\n Args:\n title (str): title\n *defaults: default values\n \"\"\"\n imax = len(self.x) - 1\n i0, i1 = defaults\n param = ROIParam(title)\n param.col1 = i0\n param.col2 = i1\n param.set_global_prop(\"data\", min=-1, max=imax)\n return param\n\n @staticmethod\n def params_to_roidata(params: gds.DataSetGroup) -> np.ndarray:\n \"\"\"Convert ROI dataset group to ROI array data.\n\n Args:\n params (DataSetGroup): ROI dataset group\n\n Returns:\n numpy.ndarray: ROI array data\n \"\"\"\n roilist = []\n for roiparam in params.datasets:\n roiparam: ROIParam\n roilist.append([roiparam.col1, roiparam.col2])\n if len(roilist) == 0:\n return None\n return np.array(roilist, int)\n\n def new_roi_item(self, fmt: str, lbl: bool, editable: bool):\n \"\"\"Return a new ROI item from scratch\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n \"\"\"\n coords = self.x.min(), self.x.max()\n return base.make_roi_item(\n lambda x, y, _title: make.range(x, y),\n coords,\n \"ROI\",\n fmt,\n lbl,\n editable,\n option=\"shape/drag\",\n )\n\n def iterate_roi_items(self, fmt: str, lbl: bool, editable: bool = True):\n \"\"\"Make plot item representing a Region of Interest.\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n\n Yields:\n PlotItem: plot item\n \"\"\"\n if self.roi is not None:\n for index, coords in enumerate(self.x[self.roi]):\n yield base.make_roi_item(\n lambda x, y, _title: make.range(x, y),\n coords,\n f\"ROI{index:02d}\",\n fmt,\n lbl,\n editable,\n option=\"shape/drag\",\n )\n\n def add_label_with_title(self, title: str | None = None) -> None:\n \"\"\"Add label with title annotation\n\n Args:\n title (str): title (if None, use signal title)\n \"\"\"\n title = self.title if title is None else title\n if title:\n label = make.label(title, \"TL\", (0, 0), \"TL\")\n self.add_annotations_from_items([label])" }, { "identifier": "create_signal_from_param", "path": "cdl/core/model/signal.py", "snippet": "def create_signal_from_param(\n newparam: NewSignalParam,\n addparam: gds.DataSet | None = None,\n edit: bool = False,\n parent: QW.QWidget | None = None,\n) -> SignalObj | None:\n \"\"\"Create a new Signal object from a dialog box.\n\n Args:\n newparam (NewSignalParam): new signal parameters\n addparam (guidata.dataset.DataSet): additional parameters\n edit (bool): Open a dialog box to edit parameters (default: False)\n parent (QWidget): parent widget\n\n Returns:\n SignalObj: signal object or None if canceled\n \"\"\"\n global SIG_NB # pylint: disable=global-statement\n if newparam is None:\n newparam = new_signal_param()\n incr_sig_nb = not newparam.title\n if incr_sig_nb:\n newparam.title = f\"{newparam.title} {SIG_NB + 1:d}\"\n if not edit or addparam is not None or newparam.edit(parent=parent):\n prefix = newparam.stype.name.lower()\n if incr_sig_nb:\n SIG_NB += 1\n signal = create_signal(newparam.title)\n xarr = np.linspace(newparam.xmin, newparam.xmax, newparam.size)\n p = addparam\n if newparam.stype == SignalTypes.ZEROS:\n signal.set_xydata(xarr, np.zeros(newparam.size))\n elif newparam.stype in (SignalTypes.UNIFORMRANDOM, SignalTypes.NORMALRANDOM):\n pclass = {\n SignalTypes.UNIFORMRANDOM: base.UniformRandomParam,\n SignalTypes.NORMALRANDOM: base.NormalRandomParam,\n }[newparam.stype]\n if p is None:\n p = pclass(_(\"Signal\") + \" - \" + prefix)\n if edit and not p.edit(parent=parent):\n return None\n rng = np.random.default_rng(p.seed)\n if newparam.stype == SignalTypes.UNIFORMRANDOM:\n yarr = rng.random((newparam.size,)) * (p.vmax - p.vmin) + p.vmin\n if signal.title == DEFAULT_TITLE:\n signal.title = f\"{prefix}(vmin={p.vmin:.3g},vmax={p.vmax:.3g})\"\n elif newparam.stype == SignalTypes.NORMALRANDOM:\n yarr = rng.normal(p.mu, p.sigma, size=(newparam.size,))\n if signal.title == DEFAULT_TITLE:\n signal.title = f\"{prefix}(mu={p.mu:.3g},sigma={p.sigma:.3g})\"\n else:\n raise NotImplementedError(f\"New param type: {prefix}\")\n signal.set_xydata(xarr, yarr)\n elif newparam.stype in (\n SignalTypes.GAUSS,\n SignalTypes.LORENTZ,\n SignalTypes.VOIGT,\n ):\n func, title = {\n SignalTypes.GAUSS: (fit.GaussianModel.func, _(\"Gaussian\")),\n SignalTypes.LORENTZ: (fit.LorentzianModel.func, _(\"Lorentzian\")),\n SignalTypes.VOIGT: (fit.VoigtModel.func, \"Voigt\"),\n }[newparam.stype]\n if p is None:\n p = GaussLorentzVoigtParam(title)\n if edit and not p.edit(parent=parent):\n return None\n yarr = func(xarr, p.a, p.sigma, p.mu, p.ymin)\n signal.set_xydata(xarr, yarr)\n if signal.title == DEFAULT_TITLE:\n signal.title = (\n f\"{prefix}(a={p.a:.3g},sigma={p.sigma:.3g},\"\n f\"mu={p.mu:.3g},ymin={p.ymin:.3g})\"\n )\n elif newparam.stype in (\n SignalTypes.SINUS,\n SignalTypes.COSINUS,\n SignalTypes.SAWTOOTH,\n SignalTypes.TRIANGLE,\n SignalTypes.SQUARE,\n SignalTypes.SINC,\n ):\n func, title = {\n SignalTypes.SINUS: (np.sin, _(\"Sinusoid\")),\n SignalTypes.COSINUS: (np.cos, _(\"Sinusoid\")),\n SignalTypes.SAWTOOTH: (sps.sawtooth, _(\"Sawtooth function\")),\n SignalTypes.TRIANGLE: (triangle_func, _(\"Triangle function\")),\n SignalTypes.SQUARE: (sps.square, _(\"Square function\")),\n SignalTypes.SINC: (np.sinc, _(\"Cardinal sine\")),\n }[newparam.stype]\n if p is None:\n p = PeriodicParam(title)\n if edit and not p.edit(parent=parent):\n return None\n freq = p.get_frequency_in_hz()\n yarr = p.a * func(2 * np.pi * freq * xarr + np.deg2rad(p.phase)) + p.ymin\n signal.set_xydata(xarr, yarr)\n if signal.title == DEFAULT_TITLE:\n signal.title = (\n f\"{prefix}(f={p.freq:.3g} {p.freq_unit.value}),\"\n f\"a={p.a:.3g},ymin={p.ymin:.3g},phase={p.phase:.3g}°)\"\n )\n elif newparam.stype == SignalTypes.STEP:\n if p is None:\n p = StepParam(_(\"Step function\"))\n if edit and not p.edit(parent=parent):\n return None\n yarr = np.ones_like(xarr) * p.a1\n yarr[xarr > p.x0] = p.a2\n signal.set_xydata(xarr, yarr)\n if signal.title == DEFAULT_TITLE:\n signal.title = f\"{prefix}(x0={p.x0:.3g},a1={p.a1:.3g},a2={p.a2:.3g})\"\n return signal\n return None" }, { "identifier": "new_signal_param", "path": "cdl/core/model/signal.py", "snippet": "def new_signal_param(\n title: str | None = None,\n stype: str | None = None,\n xmin: float | None = None,\n xmax: float | None = None,\n size: int | None = None,\n) -> NewSignalParam:\n \"\"\"Create a new Signal dataset instance.\n\n Args:\n title (str): dataset title (default: None, uses default title)\n stype (str): signal type (default: None, uses default type)\n xmin (float): X min (default: None, uses default value)\n xmax (float): X max (default: None, uses default value)\n size (int): signal size (default: None, uses default value)\n\n Returns:\n NewSignalParam: new signal dataset instance\n \"\"\"\n title = DEFAULT_TITLE if title is None else title\n param = NewSignalParam(title=title, icon=get_icon(\"new_signal.svg\"))\n param.title = title\n if xmin is not None:\n param.xmin = xmin\n if xmax is not None:\n param.xmax = xmax\n if size is not None:\n param.size = size\n if stype is not None:\n param.stype = stype\n return param" } ]
from typing import TYPE_CHECKING from plotpy.tools import ( HCursorTool, HRangeTool, LabelTool, RectangleTool, SegmentTool, VCursorTool, XCursorTool, ) from cdl.config import _ from cdl.core.gui import roieditor from cdl.core.gui.actionhandler import SignalActionHandler from cdl.core.gui.panel.base import BaseDataPanel from cdl.core.gui.plothandler import SignalPlotHandler from cdl.core.gui.processor.signal import SignalProcessor from cdl.core.io.signal import SignalIORegistry from cdl.core.model.signal import SignalObj, create_signal_from_param, new_signal_param from plotpy.plot import PlotWidget from qtpy import QtWidgets as QW from cdl.core.model.signal import NewSignalParam import guidata.dataset as gds
19,618
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """DataLab Signal Panel""" # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations if TYPE_CHECKING: # pragma: no cover class SignalPanel(BaseDataPanel): """Object handling the item list, the selected item properties and plot, specialized for Signal objects""" PANEL_STR = _("Signal panel") PARAMCLASS = SignalObj ANNOTATION_TOOLS = ( LabelTool, VCursorTool, HCursorTool, XCursorTool, SegmentTool, RectangleTool, HRangeTool, ) IO_REGISTRY = SignalIORegistry H5_PREFIX = "DataLab_Sig" ROIDIALOGCLASS = roieditor.SignalROIEditor # pylint: disable=duplicate-code def __init__(self, parent: QW.QWidget, plotwidget: PlotWidget, toolbar) -> None: super().__init__(parent, plotwidget, toolbar) self.plothandler = SignalPlotHandler(self, plotwidget) self.processor = SignalProcessor(self, plotwidget) self.acthandler = SignalActionHandler(self, toolbar) # ------Creating, adding, removing objects------------------------------------------ def get_newparam_from_current( self, newparam: NewSignalParam | None = None ) -> NewSignalParam | None: """Get new object parameters from the current object. Args: newparam (guidata.dataset.DataSet): new object parameters. If None, create a new one. Returns: New object parameters """ curobj: SignalObj = self.objview.get_current_object()
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """DataLab Signal Panel""" # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations if TYPE_CHECKING: # pragma: no cover class SignalPanel(BaseDataPanel): """Object handling the item list, the selected item properties and plot, specialized for Signal objects""" PANEL_STR = _("Signal panel") PARAMCLASS = SignalObj ANNOTATION_TOOLS = ( LabelTool, VCursorTool, HCursorTool, XCursorTool, SegmentTool, RectangleTool, HRangeTool, ) IO_REGISTRY = SignalIORegistry H5_PREFIX = "DataLab_Sig" ROIDIALOGCLASS = roieditor.SignalROIEditor # pylint: disable=duplicate-code def __init__(self, parent: QW.QWidget, plotwidget: PlotWidget, toolbar) -> None: super().__init__(parent, plotwidget, toolbar) self.plothandler = SignalPlotHandler(self, plotwidget) self.processor = SignalProcessor(self, plotwidget) self.acthandler = SignalActionHandler(self, toolbar) # ------Creating, adding, removing objects------------------------------------------ def get_newparam_from_current( self, newparam: NewSignalParam | None = None ) -> NewSignalParam | None: """Get new object parameters from the current object. Args: newparam (guidata.dataset.DataSet): new object parameters. If None, create a new one. Returns: New object parameters """ curobj: SignalObj = self.objview.get_current_object()
newparam = new_signal_param() if newparam is None else newparam
9
2023-11-09 16:56:03+00:00
24k
ingra14m/Tensor4D-DNeRF
exp_runner.py
[ { "identifier": "Dataset", "path": "models/dataset.py", "snippet": "class Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('data_dir')\n self.render_cameras_name = conf.get_string('render_cameras_name')\n self.object_cameras_name = conf.get_string('object_cameras_name')\n\n self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)\n self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)\n self.near = conf.get_float('near', default=-1)\n self.far = conf.get_float('far', default=-1)\n self.n_frames = conf.get_int('n_frames', default=128)\n\n camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))\n self.camera_dict = camera_dict\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))\n self.n_images = len(self.images_lis)\n self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0\n self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))\n self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0\n\n # world_mat is a projection matrix from world to image\n self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n self.fid_list = [torch.LongTensor(np.array([camera_dict['fid_%d' % idx]])) for idx in range(self.n_images)]\n self.scale_mats_np = []\n\n # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.\n self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.intrinsics_all = []\n self.pose_all = []\n self.proj_all = []\n\n for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):\n P = world_mat @ scale_mat\n P = P[:3, :4]\n intrinsics, pose = load_K_Rt_from_P(None, P)\n self.intrinsics_all.append(torch.from_numpy(intrinsics).float())\n self.pose_all.append(torch.from_numpy(pose).float())\n self.proj_all.append(torch.from_numpy(P).float())\n\n self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.errors = self.masks[:, :, :, :1].clone()\n self.errors = F.interpolate(self.errors.permute(0, 3, 1, 2), (self.images.shape[1] // 8, self.images.shape[2] // 8), mode='bilinear')\n self.errors = F.max_pool2d(self.errors, 7, stride=1, padding=3)\n self.errors = self.errors.permute(0, 2, 3, 1)\n self.radius = torch.zeros(self.masks.shape[0], self.masks.shape[2], self.masks.shape[1], 1) # [n_images, W, H, 3]\n \n self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.focal = self.intrinsics_all[0][0, 0]\n self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]\n self.proj_all = torch.stack(self.proj_all).to(self.device)\n self.H, self.W = self.images.shape[1], self.images.shape[2]\n self.image_pixels = self.H * self.W\n self.fid_all = torch.stack(self.fid_list).to(self.device)\n self.time_emb_list = (self.fid_all / self.n_frames * 2) - 0.95\n\n object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])\n object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])\n # Object scale mat: region of interest to **extract mesh**\n object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']\n object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]\n object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]\n self.object_bbox_min = object_bbox_min[:3, 0]\n self.object_bbox_max = object_bbox_max[:3, 0]\n self.process_radius()\n\n print('Load data: End')\n\n def process_radius(self):\n for img_idx in tqdm(range(self.images.shape[0])):\n tx = torch.linspace(0, self.W - 1, self.W, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n # Cut the distance in half, and then round it out so that it's\n # halfway between inscribed by / circumscribed about the pixel.\n radii = dx[..., None] * 2 / np.sqrt(12)\n self.radius[img_idx] = radii.detach().cpu() # W H 3\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n \"\"\"\n error = self.errors[img_idx].reshape(-1).numpy()\n max_error = np.max(error) + 1e-8\n error = error / max_error\n error[error < 0.1] = 0.1\n error = error / np.sum(error)\n index = np.arange(0, self.W*self.H // 64)\n select_index = np.random.choice(index, size=[batch_size], p=error)\n pixels_y = torch.LongTensor(select_index // (self.W // 8)) * 8\n pixels_y += torch.randint_like(pixels_y, 8)\n pixels_x = torch.LongTensor(select_index % (self.W // 8)) * 8\n pixels_x += torch.randint_like(pixels_x, 8)\n\n color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n rays_r = self.radius[img_idx][(pixels_x, pixels_y)] # batch_size, 1\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float().to(self.device) # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color.cpu(), mask[:, :1].cpu(), rays_r.cpu()], dim=-1).cuda(), pixels_y.cpu(), pixels_x.cpu() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n if self.near > 0:\n return self.near, self.far\n a = torch.sum(rays_d**2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)" }, { "identifier": "BlenderDataset", "path": "models/dataset.py", "snippet": "class BlenderDataset:\n def __init__(self, conf):\n super(BlenderDataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.near = conf.get_float('near', default=-1)\n self.far = conf.get_float('far', default=-1)\n self.n_frames = conf.get_int('n_frames', default=128)\n\n self.data_dir = conf.get_string('data_dir')\n splits = ['train']\n metas = {}\n for s in splits:\n with open(os.path.join(self.data_dir, 'transforms_{}.json'.format(s)), 'r') as fp:\n metas[s] = json.load(fp)\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'train/*.png')), key=lambda x: int(x.split('.')[0].split('_')[-1]))\n # if self.data_dir.split('/')[-2] == 'lego':\n # # self.images_lis = self.images_lis[1:]\n # self.images_lis.append('/data00/yzy/Git_Project/data/dynamic/D-NeRF/lego/val/r_0.png')\n all_imgs = []\n all_poses = []\n all_masks = []\n all_times = []\n counts = [0]\n for s in splits:\n meta = metas[s]\n\n imgs = []\n poses = []\n times = []\n\n for t, frame in enumerate(meta['frames']):\n fname = os.path.join(self.data_dir, frame['file_path'] + '.png')\n image = cv.imread(fname, cv.IMREAD_UNCHANGED)\n imgs.append(image)\n pose = np.array(frame['transform_matrix'])\n time = np.array([frame['time']])\n\n a = pose[:, 0:1]\n b = pose[:, 1:2]\n c = pose[:, 2:3]\n d = pose[:, 3:].copy()\n d[:3, :] *= 0.8\n\n pose = np.concatenate([a, -b, -c, d], 1)\n\n poses.append(pose)\n times.append(time)\n\n imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)\n poses = np.array(poses).astype(np.float32)\n times = np.array(times).astype(np.float32)\n masks = (imgs[..., 3:] > 0).astype(np.float32)\n imgs = imgs[..., :3]\n counts.append(counts[-1] + imgs.shape[0])\n all_imgs.append(imgs)\n all_poses.append(poses)\n all_masks.append(masks)\n all_times.append(times)\n\n self.images = torch.from_numpy(np.concatenate(all_imgs, 0)).cpu()\n self.masks = torch.from_numpy(np.concatenate(all_masks, 0)).cpu()\n self.radius = torch.zeros(self.masks.shape[0], self.masks.shape[2], self.masks.shape[1], 1) # no use\n self.errors = self.masks[:, :, :, :1].clone()\n self.errors = F.interpolate(self.errors.permute(0, 3, 1, 2),\n (self.images.shape[1] // 8, self.images.shape[2] // 8), mode='bilinear')\n self.errors = F.max_pool2d(self.errors, 7, stride=1, padding=3)\n self.errors = self.errors.permute(0, 2, 3, 1)\n self.n_images = self.images.shape[0]\n\n self.fid_list = [torch.LongTensor(np.array([idx])) for idx in range(self.n_images)]\n # if self.data_dir.split('/')[-2] == 'lego':\n # self.fid_list[-1] = torch.LongTensor(np.array([0]))\n self.pose_all = torch.from_numpy(np.concatenate(all_poses, 0)).to(self.device)\n self.fid_all = torch.stack(self.fid_list).to(self.device)\n self.time_emb_list = torch.from_numpy(np.concatenate(all_times, 0)).to(self.device)\n\n self.H, self.W = self.images[0].shape[:2]\n self.image_pixels = self.H * self.W\n\n camera_angle_x = float(meta['camera_angle_x'])\n self.focal = .5 * self.W / np.tan(.5 * camera_angle_x)\n intrinsics = torch.Tensor(\n [[self.focal, 0, 0.5 * self.W, 0],\n [0, self.focal, 0.5 * self.H, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]).to(self.device)\n self.intrinsics_all = intrinsics.expand(self.n_images, -1, -1)\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.object_bbox_min = np.array([-1.01, -1.01, -1.01]) # hard code bbox\n self.object_bbox_max = np.array([1.01, 1.01, 1.01])\n self.process_radius()\n\n print('Load data: End')\n\n def process_radius(self):\n for img_idx in tqdm(range(self.images.shape[0])):\n tx = torch.linspace(0, self.W - 1, self.W, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n # Cut the distance in half, and then round it out so that it's\n # halfway between inscribed by / circumscribed about the pixel.\n radii = dx[..., None] * 2 / np.sqrt(12)\n self.radius[img_idx] = radii.detach().cpu() # W H 3\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3],\n p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n \"\"\"\n error = self.errors[img_idx].reshape(-1).numpy()\n max_error = np.max(error) + 1e-8\n error = error / max_error\n error[error < 0.1] = 0.1\n error = error / np.sum(error)\n index = np.arange(0, self.W * self.H // 64)\n select_index = np.random.choice(index, size=[batch_size], p=error)\n pixels_y = torch.LongTensor(select_index // (self.W // 8)) * 8\n pixels_y += torch.randint_like(pixels_y, 8)\n pixels_x = torch.LongTensor(select_index % (self.W // 8)) * 8\n pixels_x += torch.randint_like(pixels_x, 8)\n\n color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n rays_r = self.radius[img_idx][(pixels_x, pixels_y)] # batch_size, 1\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float().to(\n self.device) # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color.cpu(), mask[:, :1].cpu(), rays_r.cpu()],\n dim=-1).cuda(), pixels_y.cpu(), pixels_x.cpu() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n if self.near > 0:\n return self.near, self.far\n a = torch.sum(rays_d ** 2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)" }, { "identifier": "RenderingNetwork", "path": "models/fields.py", "snippet": "class RenderingNetwork(nn.Module):\n def __init__(self,\n d_feature,\n mode,\n d_in,\n d_out,\n d_hidden,\n n_layers,\n weight_norm=True,\n multires_view=0,\n squeeze_out=True):\n super().__init__()\n\n self.mode = mode\n self.squeeze_out = squeeze_out\n dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out]\n\n self.embedview_fn = None\n if multires_view > 0:\n embedview_fn, input_ch = get_embedder(multires_view)\n self.embedview_fn = embedview_fn\n dims[0] += (input_ch - 3)\n\n self.num_layers = len(dims)\n\n for l in range(0, self.num_layers - 1):\n out_dim = dims[l + 1]\n lin = nn.Linear(dims[l], out_dim)\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n self.relu = nn.ReLU()\n\n self.mask = -torch.ones((1, 1, 256, 256, 256)).float().cuda()\n \n\n def forward(self, points, normals, view_dirs, feature_vectors):\n if self.embedview_fn is not None:\n view_dirs = self.embedview_fn(view_dirs)\n\n rendering_input = NoOptionError\n\n if self.mode == 'idr':\n rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1)\n elif self.mode == 'no_view_dir':\n rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)\n elif self.mode == 'no_normal':\n rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)\n\n x = rendering_input\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n\n x = lin(x)\n\n if l < self.num_layers - 2:\n x = self.relu(x)\n\n if self.squeeze_out:\n x = torch.sigmoid(x)\n return x" }, { "identifier": "FieldNetwork", "path": "models/fields.py", "snippet": "class FieldNetwork(nn.Module):\n def __init__(self,\n d_in,\n d_out,\n d_hidden,\n d_t4d,\n min_emb,\n max_emb,\n n_layers,\n t_emb=-1,\n skip_in=(4,),\n bias=0.5,\n geometric_init=True,\n weight_norm=True):\n super(FieldNetwork, self).__init__()\n\n dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]\n dims[0] = d_in + (max_emb - min_emb)*3*2\n\n self.num_layers = len(dims)\n self.skip_in = skip_in\n self.min_emb = min_emb\n self.max_emb = max_emb\n self.t_emb = t_emb\n\n if t_emb > 0:\n embed_fn, time_input_ch = get_embedder(t_emb, input_dims=1)\n self.embed_fn = embed_fn\n dims[0] += time_input_ch\n\n for l in range(0, self.num_layers - 1):\n if l in self.skip_in:\n in_dim = dims[l] + dims[0] + d_t4d\n else:\n in_dim = dims[l]\n out_dim = dims[l+1]\n\n lin = nn.Linear(in_dim, out_dim)\n \n if geometric_init:\n if l == self.num_layers - 2:\n torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)\n torch.nn.init.constant_(lin.bias, -bias)\n elif l == 0:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.constant_(lin.weight[:, 3:], 0.0)\n torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim))\n elif l in self.skip_in:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n torch.nn.init.constant_(lin.weight[:, -(dims[0] + d_t4d):], 0.0)\n else:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n self.activation = nn.Softplus(beta=100)\n\n def set_tensor4d(self, tensor4d):\n self.tensor4d = tensor4d\n\n def forward(self, mean, cov, fid, time_emb, reg_l2=False):\n cones_embedding = integrated_pos_enc((mean[:, None, :], cov[:, None, :]), self.min_emb, self.max_emb, diagonal=True).reshape(mean.shape[0], -1)\n inputs = mean\n tri_feat = self.tensor4d(inputs, fid, torch.mean(time_emb))\n\n if reg_l2:\n d_vec = F.normalize(torch.randn_like(inputs), dim=-1) * 1e-3\n d_tri_feat = self.tensor4d(inputs + d_vec, fid, torch.mean(time_emb))\n pred_reg_l2 = (d_tri_feat - tri_feat)**2\n \n xyz = inputs\n if self.t_emb > 0:\n time_input = self.embed_fn(time_emb)\n x = torch.cat([xyz, cones_embedding, time_input], 1)\n else:\n x = torch.cat([xyz, cones_embedding], 1)\n\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n \n if l in self.skip_in:\n if self.t_emb > 0:\n x = torch.cat([x, tri_feat, xyz, cones_embedding, time_input], 1) / np.sqrt(2)\n else:\n x = torch.cat([x, tri_feat, xyz, cones_embedding], 1) / np.sqrt(2)\n x = lin(x)\n\n if l < self.num_layers - 2:\n x = self.activation(x)\n if reg_l2:\n return x, pred_reg_l2\n return x" }, { "identifier": "SingleVarianceNetwork", "path": "models/fields.py", "snippet": "class SingleVarianceNetwork(nn.Module):\n def __init__(self, init_val):\n super(SingleVarianceNetwork, self).__init__()\n init_tensor = torch.zeros(120)\n init_tensor[:] = init_val\n self.register_parameter('variance', nn.Parameter(init_tensor))\n\n def forward(self, x):\n return torch.ones([len(x), 1], device=x.device) * torch.exp(self.variance[0] * 10.0)" }, { "identifier": "Tensor4D", "path": "models/tensor4d.py", "snippet": "class Tensor4D(nn.Module):\n def __init__(self, feature_type, lr_resolution, hr_resolution, image_guide=False, image_guide_interval=2, image_guide_base=16) -> None:\n super(Tensor4D, self).__init__()\n \n self.data_dims = 0\n self.feature_type = feature_type\n if feature_type == '3d':\n self.feature_plane = SpacePlane(lr_resolution, hr_resolution)\n self.data_dims = self.feature_plane.dims\n elif feature_type == '4d':\n self.feature_plane = TimeSpacePlane(lr_resolution, hr_resolution)\n self.data_dims = self.feature_plane.dims\n\n self.img_dims = 0\n self.image_guide = image_guide\n if image_guide:\n self.conv_net = ConvNet(image_guide_base)\n self.img_dims = image_guide_base*8*2\n self.ig_interval = image_guide_interval\n\n if feature_type == '4d':\n self.compress_network = CompressNetwork(self.data_dims, self.data_dims // 3)\n self.compress_list = [self.compress_network.compress1, self.compress_network.compress2, self.compress_network.compress3]\n\n self.dims = self.data_dims + self.img_dims\n self.matMode = torch.BoolTensor([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).cuda()\n self.vecMode = torch.BoolTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).cuda()\n \n def get_data_parameters(self):\n return list(self.feature_plane.parameters())\n \n def get_network_parameters(self):\n params = []\n if self.feature_type == '4d':\n params += list(self.compress_network.parameters())\n if self.image_guide:\n params += list(self.conv_net.parameters())\n return params\n\n def set_images(self, image, proj):\n step = self.ig_interval\n select_proj = torch.cat([proj[i*step:i*step+1] for i in range(proj.shape[0] // step)], dim=0)\n self.proj = select_proj\n self.img_shape = image.shape\n select_image = torch.cat([image[i*step:i*step+1] for i in range(image.shape[0] // step)], dim=0)\n self.image_feature, self.image_feature_hr = self.conv_net(F.interpolate(select_image.permute(0, 3, 1, 2), size=(1024, 1024)))\n\n def forward(self, xyz_sampled_ori, fid, time_emb):\n sigma_feature_list = [] \n\n if self.image_guide:\n proj_pts = ((self.proj[:, :3, :3] @ xyz_sampled_ori.T.unsqueeze(0)) + self.proj[:, :3, 3:]).transpose(1, 2)\n proj_xy = proj_pts[:, :, :2] / (proj_pts[:, :, 2:] + 1e-6)\n B, H, W, C = self.img_shape\n proj_xy[:, :, 0] = (proj_xy[:, :, 0] - W / 2) / (W / 2)\n proj_xy[:, :, 1] = (proj_xy[:, :, 1] - H / 2) / (H / 2)\n N = self.image_feature.shape[0]\n img_feature = grid_sample(self.image_feature, proj_xy.reshape(N, -1, 1, 2)).reshape(N, -1, xyz_sampled_ori.shape[0])\n img_feature_cost = torch.sqrt(torch.sum((img_feature - torch.sum(img_feature, dim=0).unsqueeze(0) / N)**2, dim=0) / N + 1e-8)\n img_feature_max = torch.mean(img_feature, dim=0) + torch.max(img_feature, dim=0)[0]\n image_feature_hr = grid_sample(self.image_feature_hr, proj_xy.reshape(N, -1, 1, 2)).reshape(N, -1, xyz_sampled_ori.shape[0])\n image_feature_hr_cost = torch.sqrt(torch.sum((image_feature_hr - torch.sum(image_feature_hr, dim=0).unsqueeze(0) / N)**2, dim=0) / N + 1e-8)\n image_feature_hr_max = torch.mean(image_feature_hr, dim=0) + torch.max(image_feature_hr, dim=0)[0]\n sigma_feature_list = [img_feature_cost, img_feature_max, image_feature_hr_cost, image_feature_hr_max]\n \n xyz_sampled = xyz_sampled_ori\n scale = 1.0\n matMode = self.matMode\n coordinate_plane = torch.stack((xyz_sampled[..., matMode[0]] * scale, xyz_sampled[..., matMode[1]] * scale, xyz_sampled[..., matMode[2]] * scale)).view(3, -1, 1, 2)\n\n for idx_plane in range(3):\n sample_points = coordinate_plane[[idx_plane]]\n plane_coef_point = self.feature_plane.sample(sample_points, idx_plane, time_emb).view(-1, *xyz_sampled.shape[:1])\n if self.feature_type == '4d':\n plane_coef_point = self.compress_list[idx_plane](plane_coef_point.T).T\n sigma_feature_list.append(plane_coef_point)\n \n sigma_feature_list = torch.cat(sigma_feature_list, dim=0)\n # print(sigma_feature_list.shape)\n return sigma_feature_list.T" }, { "identifier": "NeuSRenderer", "path": "models/renderer.py", "snippet": "class NeuSRenderer:\n def __init__(self,\n sdf_network,\n deviation_network,\n color_network,\n mask3d,\n n_samples,\n n_importance,\n n_outside,\n up_sample_steps,\n perturb,\n reg_l2=False,\n mip_render=False,\n flow_network=None):\n \n self.sdf_network = sdf_network\n self.deviation_network = deviation_network\n self.color_network = color_network\n self.mask3d = mask3d\n self.n_samples = n_samples\n self.n_importance = n_importance\n self.n_outside = n_outside\n self.up_sample_steps = up_sample_steps\n self.perturb = perturb\n self.reg_l2 = reg_l2\n self.flow_network = flow_network\n self.mip_render = mip_render\n\n def mask_query_geometry(self, mean, cov, only_sdf=False):\n fid = self.fid\n time_emb = self.time_emb\n time_input = time_emb.expand(mean[:, :1].shape)\n space_time_input = torch.cat([mean, time_input], dim=-1)\n if not only_sdf:\n space_time_input.requires_grad_(True)\n inputs = space_time_input[:, :3]\n time_emb = space_time_input[:, 3:]\n N, _ = inputs.shape\n grads = torch.zeros((N, 4), device=inputs.device)\n sdf_nn = torch.zeros((N, 257), device=inputs.device)\n\n reg_l2 = torch.zeros((N, self.sdf_network.tensor4d.dims), device=inputs.device)\n grads[:, 0] = 1\n sdf_nn[:, 0] = -10\n\n mask = self.mask3d.valid_input(inputs, fid)\n if torch.sum(mask) == 0:\n results = {\n 'sdf_nn': sdf_nn,\n 'grads': grads[:, :3],\n 'time_grads': grads[:, 3:],\n 'pts_mask': mask,\n 'reg_l2': reg_l2\n }\n return results\n mask_mean = inputs[mask, :]\n mask_time_emb = time_emb[mask, :]\n mask_cov = cov[mask, :]\n \n if self.flow_network is not None:\n mask_cov = torch.zeros_like(mask_mean) # flow mode, disable mip_render\n if fid != 0:\n pred_flow = self.flow_network(mask_mean, mask_cov, fid, mask_time_emb, reg_l2=False)\n mask_mean = mask_mean + pred_flow\n elif not self.mip_render:\n mask_cov = torch.zeros_like(mask_mean)\n\n if (not only_sdf) and self.reg_l2:\n pred_sdf_nn, pred_reg_l2 = self.sdf_network(mask_mean, mask_cov, fid, mask_time_emb, reg_l2=True)\n reg_l2[mask] = pred_reg_l2\n else:\n pred_sdf_nn = self.sdf_network(mask_mean, mask_cov, fid, mask_time_emb, reg_l2=False)\n\n if not only_sdf:\n pred_sdf = pred_sdf_nn[:, :1]\n d_output = torch.ones_like(pred_sdf, requires_grad=False, device=pred_sdf.device)\n gradients = torch.autograd.grad(\n outputs=pred_sdf,\n inputs=space_time_input,\n grad_outputs=d_output,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n grads[mask] = gradients.reshape(-1, 4)[mask]\n \n sdf_nn[mask] = pred_sdf_nn\n results = {\n 'sdf_nn': sdf_nn,\n 'grads': grads[:, :3],\n 'time_grads': grads[:, 3:],\n 'pts_mask': mask,\n 'reg_l2': reg_l2\n }\n return results\n\n def mask_query_color(self, pts, mask, normals, view_dirs, features):\n N, _ = pts.shape\n out = torch.zeros((N, 3), device=pts.device)\n if torch.sum(mask) > 0:\n x = self.color_network(pts[mask], normals[mask], view_dirs[mask], features[mask])\n out[mask] = x\n return out\n else:\n return torch.zeros((N, 3), device=pts.device)\n\n def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s, pts_mask):\n \"\"\"\n Up sampling give a fixed inv_s\n \"\"\"\n batch_size, n_samples = z_vals.shape\n pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3\n radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False)\n inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0)\n sdf = sdf.reshape(batch_size, n_samples)\n prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]\n prev_mask, next_mask = pts_mask[:, :-1], pts_mask[:, 1:]\n mid_mask = torch.logical_and(prev_mask, next_mask)\n prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]\n mid_sdf = (prev_sdf + next_sdf) * 0.5\n cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)\n\n # ----------------------------------------------------------------------------------------------------------\n # Use min value of [ cos, prev_cos ]\n # Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more\n # robust when meeting situations like below:\n #\n # SDF\n # ^\n # |\\ -----x----...\n # | \\ /\n # | x x\n # |---\\----/-------------> 0 level\n # | \\ /\n # | \\/\n # |\n # ----------------------------------------------------------------------------------------------------------\n prev_cos_val = torch.cat([torch.zeros([batch_size, 1], device=sdf.device), cos_val[:, :-1]], dim=-1)\n cos_val = torch.stack([prev_cos_val, cos_val], dim=-1)\n cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False)\n cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere\n\n dist = (next_z_vals - prev_z_vals)\n prev_esti_sdf = mid_sdf - cos_val * dist * 0.5\n next_esti_sdf = mid_sdf + cos_val * dist * 0.5\n prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s)\n next_cdf = torch.sigmoid(next_esti_sdf * inv_s)\n\n alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)\n alpha[~mid_mask] = 0\n alpha = alpha.clamp(0.0, 1.0)\n \n alpha = torch.cat([alpha, torch.zeros([batch_size, 1], device=alpha.device)], dim=-1)\n weights = alpha * torch.cumprod(\n torch.cat([torch.ones([batch_size, 1], device=alpha.device), 1. - alpha + 1e-7], -1), -1)[:, :-1]\n\n z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()\n return z_samples\n\n def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, pts_mask, last=False):\n batch_size, n_samples = z_vals.shape\n _, n_importance = new_z_vals.shape\n pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None]\n z_vals = torch.cat([z_vals, new_z_vals], dim=-1)\n z_vals, index = torch.sort(z_vals, dim=-1)\n if not last:\n new_sdf, new_pts_mask = self.sdf_network.sdf(pts.reshape(-1, 3), rt_mask=True)\n new_sdf = new_sdf.reshape(batch_size, n_importance)\n new_pts_mask = new_pts_mask.reshape(batch_size, n_importance)\n sdf = torch.cat([sdf, new_sdf], dim=-1)\n pts_mask = torch.cat([pts_mask, new_pts_mask], dim=-1)\n xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1)\n index = index.reshape(-1)\n sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance)\n pts_mask = pts_mask[(xx, index)].reshape(batch_size, n_samples + n_importance)\n\n return z_vals, sdf, pts_mask\n\n def render_core(self,\n rays_o,\n rays_d,\n rays_r,\n z_vals,\n sample_dist,\n background_alpha=None,\n background_sampled_color=None,\n background_rgb=None,\n cos_anneal_ratio=0.0):\n batch_size, n_samples = z_vals[:, :-1].shape\n\n # Section length\n dists = z_vals[..., 1:] - z_vals[..., :-1]\n cat_dists = torch.cat([dists, torch.Tensor([sample_dist]).to(dists.device).expand(dists[..., :1].shape)], -1)\n mid_z_vals = z_vals + cat_dists * 0.5\n\n cones = cast_rays(z_vals, rays_o, rays_d, rays_r, 'cone', diagonal=True)\n dirs = rays_d[:, None, :].expand(cones[0].shape)\n dirs = dirs.reshape(-1, 3)\n\n results = self.mask_query_geometry(cones[0].reshape(-1, 3), cones[1].reshape(-1, 3))\n sdf_nn_output, gradients, t_grads, pts_mask = results['sdf_nn'], results['grads'], results['time_grads'], results['pts_mask']\n sdf = sdf_nn_output[:, :1]\n feature_vector = sdf_nn_output[:, 1:]\n\n gradients = gradients.squeeze()\n sampled_color = self.mask_query_color(cones[0].reshape(-1, 3), pts_mask, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3)\n \n inv_s = self.deviation_network(torch.zeros([1, 3], device=sdf.device))[:, :1].clip(1e-6, 1e6) # Single parameter\n inv_s = inv_s.expand(batch_size * n_samples, 1)\n\n true_cos = (dirs * gradients).sum(-1, keepdim=True)\n\n # \"cos_anneal_ratio\" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes\n # the cos value \"not dead\" at the beginning training iterations, for better convergence.\n iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +\n F.relu(-true_cos) * cos_anneal_ratio) # always non-positive\n\n # Estimate signed distances at section points\n estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5\n estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5\n\n prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)\n next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)\n\n p = prev_cdf - next_cdf\n c = prev_cdf\n\n alpha = ((p + 1e-5) / (c + 1e-5))\n \n alpha[~pts_mask] = 0\n alpha = alpha.reshape(batch_size, n_samples).clip(0.0, 1.0)\n \n weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1], device=alpha.device), 1. - alpha + 1e-7], -1), -1)[:, :-1]\n weights_sum = weights.sum(dim=-1, keepdim=True)\n \n color = (sampled_color * weights[:, :, None]).sum(dim=1)\n if background_rgb is not None: # Fixed background, usually black\n color = color + background_rgb * (1.0 - weights_sum)\n\n # Eikonal loss\n gradient_error = torch.mean((torch.linalg.norm(gradients.reshape(batch_size, n_samples, 3), ord=2,\n dim=-1) - 1.0) ** 2)\n time_grad_error = torch.mean(t_grads**2)\n return {\n 'color': color,\n 'sdf': sdf,\n 'pts_mask': pts_mask,\n 'dists': dists,\n 'gradients': gradients.reshape(batch_size, n_samples, 3),\n 's_val': 1.0 / inv_s,\n 'mid_z_vals': mid_z_vals,\n 'weights': weights,\n 'gradient_error': gradient_error,\n 'time_grad_error': time_grad_error,\n 'reg_l2': results['reg_l2'].reshape(batch_size, n_samples, -1),\n }\n\n def render(self, rays_o, rays_d, rays_r, near, far, fid, time_emb, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0):\n self.fid = fid\n self.time_emb = time_emb\n self.mask3d.set_fid(fid)\n\n batch_size = len(rays_o)\n sample_dist = 2.0 / self.n_samples # Assuming the region of interest is a unit sphere\n z_vals = torch.linspace(0.0, 1.0, self.n_samples, device=rays_o.device)\n z_vals = near + (far - near) * z_vals[None, :]\n\n z_vals_outside = None\n \n n_samples = self.n_samples\n perturb = self.perturb\n\n if perturb_overwrite >= 0:\n perturb = perturb_overwrite\n if perturb > 0:\n t_rand = (torch.rand([batch_size, 1], device=z_vals.device) - 0.5)\n z_vals = z_vals + t_rand * 2.0 / self.n_samples\n\n background_alpha = None\n background_sampled_color = None\n\n # Up sample\n if self.n_importance > 0:\n with torch.no_grad():\n cast_z_vals = torch.cat([z_vals, z_vals[:, -1:]], dim=1)\n cones = cast_rays(cast_z_vals, rays_o, rays_d, rays_r, 'cone', diagonal=True)\n results = self.mask_query_geometry(cones[0].reshape(-1, 3), cones[1].reshape(-1, 3), only_sdf=True)\n sdf, pts_mask = results['sdf_nn'][:, :1], results['pts_mask']\n # sdf, pts_mask = self.sdf_network.sdf(pts.reshape(-1, 3), rt_mask=True)\n sdf = sdf.reshape(batch_size, self.n_samples)\n pts_mask = pts_mask.reshape(batch_size, self.n_samples)\n for i in range(self.up_sample_steps):\n new_z_vals = self.up_sample(rays_o,\n rays_d,\n z_vals,\n sdf,\n self.n_importance // self.up_sample_steps + 1,\n 64 * 2**i, pts_mask)\n z_vals, sdf, pts_mask = self.cat_z_vals(rays_o,\n rays_d,\n z_vals,\n new_z_vals,\n sdf, pts_mask,\n last=(i + 1 == self.up_sample_steps))\n\n n_samples = self.n_samples + self.n_importance\n\n background_alpha = None\n background_sampled_color = None\n sample_dist = 1e-2\n\n # Render core\n ret_fine = self.render_core(rays_o,\n rays_d,\n rays_r,\n z_vals,\n sample_dist,\n background_rgb=background_rgb,\n background_alpha=background_alpha,\n background_sampled_color=background_sampled_color,\n cos_anneal_ratio=cos_anneal_ratio)\n\n\n return {\n 'color_fine': ret_fine['color'],\n 's_val': ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True),\n 'mid_z_vals': ret_fine['mid_z_vals'],\n 'weights': ret_fine['weights'],\n 'weight_sum': ret_fine['weights'].sum(dim=-1, keepdim=True),\n 'weight_max': torch.max(ret_fine['weights'], dim=-1, keepdim=True)[0],\n 'gradients': ret_fine['gradients'],\n 'gradient_error': ret_fine['gradient_error'],\n 'time_grad_error': ret_fine['time_grad_error'],\n 'reg_l2': ret_fine['reg_l2']\n }" }, { "identifier": "Mask3D", "path": "models/mask.py", "snippet": "class Mask3D:\n def __init__(self, mask_type, num_frames=None, mask_reso=None, device=None):\n self.mask_type = mask_type # 'bounding or visualhull'\n if mask_type == 'visualhull':\n self.R = mask_reso\n self.mask = torch.ones([num_frames, self.R, self.R, self.R]).float()\n self.device = device\n self.current_fid = -1\n self.current_mask = None\n\n def set_fid(self, fid):\n if fid != self.current_fid:\n self.current_fid = fid\n if self.mask_type == 'visualhull':\n self.current_mask = self.mask[fid.cpu()].to(self.device)\n \n def valid_input(self, pts, fid):\n with torch.no_grad():\n pts = pts.reshape(1, -1, 1, 1, 3)\n pts_max = torch.max(pts, dim=-1)[0]\n pts_min = torch.min(pts, dim=-1)[0]\n mask_max = (pts_max > 1).reshape(-1)\n mask_min = (pts_min < -1).reshape(-1)\n if self.mask_type == 'visualhull':\n R = self.R\n sigma = F.grid_sample(self.current_mask.view(1, 1, R, R, R), pts, mode='bilinear', padding_mode='border').reshape(-1)\n calc_mask = sigma < 0.05\n else:\n calc_mask = torch.ones_like(mask_max)\n calc_mask[mask_max] = 0\n calc_mask[mask_min] = 0\n return calc_mask\n\n def visualhull(self, pts_ori, projs, masks, g_nums):\n cam_nums = projs.shape[0]\n interval = 1\n pts_mask = torch.zeros(pts_ori.shape[0], g_nums)\n out_mask = torch.zeros(pts_ori.shape[0])\n N, H, W, C = masks.shape\n for gp in range(cam_nums // (g_nums*interval)):\n for j in range(g_nums):\n i = j + gp*(g_nums*interval)\n mask = masks[i, :, :, :1].permute(2, 0, 1).unsqueeze(0).clone()\n mask = torch.max_pool2d(mask, 7, 1, 3, 1)\n pts = torch.cat([pts_ori, torch.ones_like(pts_ori[:, :1])], dim=-1)\n pts = projs[i] @ pts.T\n pts = pts[:2] / pts[2:]\n pts[0] = pts[0] / W * 2 - 1\n pts[1] = pts[1] / H * 2 - 1\n pts = pts.T.reshape(1, -1, 1, 2)\n \n sample_mask = torch.nn.functional.grid_sample(mask, pts, mode='bilinear', padding_mode='zeros').reshape(-1)\n pts_mask[:, j] = sample_mask\n pts_mask_sum = torch.min(pts_mask, dim=1)[0]\n valid = pts_mask_sum > 0.1\n out_mask[valid] = -1\n if gp == 0:\n out_mask[~valid] = 1\n return out_mask\n\n def compute_image_mask(self, projs, masks, g_nums):\n N = 64\n R = self.R\n X = torch.linspace(-1, 1, R).split(N)\n Y = torch.linspace(-1, 1, R).split(N)\n Z = torch.linspace(-1, 1, R).split(N)\n cam_nums = projs.shape[0]\n \n self.mask = self.mask.to(self.device)\n for gp in tqdm(range(cam_nums // g_nums)):\n # for gp in range(1):\n with torch.no_grad():\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = torch.meshgrid(xs, ys, zs)\n pts = torch.cat([zz.reshape(-1, 1), yy.reshape(-1, 1), xx.reshape(-1, 1)], dim=-1).to(self.device)\n val = self.visualhull(pts, projs[gp*g_nums:gp*g_nums+g_nums].to(self.device), masks[gp*g_nums:gp*g_nums+g_nums].to(self.device), g_nums).reshape(len(xs), len(ys), len(zs))\n self.mask[gp, xi * N: xi * N + len(xs),yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val\n self.mask = self.mask.unsqueeze(1)\n self.mask = -torch.max_pool3d(-self.mask, 7, 1, 3)\n self.mask[self.mask > -0.5] = 1\n self.mask = self.mask.detach().cpu()\n \n def compute_mask(self, fid, query_func, inv_s):\n N = 64\n R = 128\n X = torch.linspace(-1, 1, R).split(N)\n Y = torch.linspace(-1, 1, R).split(N)\n Z = torch.linspace(-1, 1, R).split(N)\n from .renderer import sigma_f\n mask = self.mask[fid].reshape(R, R, R).clone()\n self.triplane[0].flow(fid)\n with torch.no_grad():\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = torch.meshgrid(xs, ys, zs)\n pts = torch.cat([zz.reshape(-1, 1), yy.reshape(-1, 1), xx.reshape(-1, 1)], dim=-1)\n val = sigma_f(query_func(pts), inv_s).reshape(len(xs), len(ys), len(zs))\n mask[xi * N: xi * N + len(xs),yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val\n valid = mask > 0.02\n mask[valid] = 1\n mask[~valid] = -1\n mask = -torch.max_pool3d(mask.reshape(1, 1, 128, 128, 128), 7, 1, 3)\n self.mask[fid][mask[0] > -0.5] = 1" } ]
import os import time import logging import argparse import numpy as np import cv2 as cv import torch import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from shutil import copyfile from tqdm import tqdm from pyhocon import ConfigFactory from models.dataset import Dataset, BlenderDataset from models.fields import RenderingNetwork, FieldNetwork, SingleVarianceNetwork from models.tensor4d import Tensor4D from models.renderer import NeuSRenderer from models.mask import Mask3D from metrics import *
15,678
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False)
self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset'])
1
2023-11-07 10:16:33+00:00
24k
Kushalhk/AutoFilter
plugins/p_ttishow.py
[ { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, { "identifier": "SUPPORT_CHAT", "path": "info.py", "snippet": "SUPPORT_CHAT = environ.get('SUPPORT_CHAT', '')" }, { "identifier": "MELCOW_NEW_USERS", "path": "info.py", "snippet": "MELCOW_NEW_USERS = is_enabled((environ.get('MELCOW_NEW_USERS', \"True\")), True)" }, { "identifier": "MELCOW_VID", "path": "info.py", "snippet": "MELCOW_VID = environ.get(\"MELCOW_VID\", \"https://te.legra.ph/file/6f55d902f9bf2d0afd4bb.mp4\")" }, { "identifier": "CHNL_LNK", "path": "info.py", "snippet": "CHNL_LNK = environ.get('CHNL_LNK', 'https://t.me/TG_LINKS_CHANNEL')" }, { "identifier": "GRP_LNK", "path": "info.py", "snippet": "GRP_LNK = environ.get('GRP_LNK', 'https://t.me/TG_SUPPORT_GROUP')" }, { "identifier": "db", "path": "database/users_chats_db.py", "snippet": "class Database:\n def __init__(self, uri, database_name):\n def new_user(self, id, name):\n def new_group(self, id, title):\n async def add_user(self, id, name):\n async def is_user_exist(self, id):\n async def total_users_count(self):\n async def remove_ban(self, id):\n async def ban_user(self, user_id, ban_reason=\"No Reason\"):\n async def get_ban_status(self, id):\n async def get_all_users(self):\n async def delete_user(self, user_id):\n async def get_banned(self):\n async def add_chat(self, chat, title):\n async def get_chat(self, chat):\n async def re_enable_chat(self, id):\n async def update_settings(self, id, settings):\n async def get_settings(self, id):\n async def disable_chat(self, chat, reason=\"No Reason\"):\n async def total_chat_count(self):\n async def get_all_chats(self):\n async def get_db_size(self):" }, { "identifier": "Media", "path": "database/ia_filterdb.py", "snippet": "class Media(Document):\n file_id = fields.StrField(attribute='_id')\n file_ref = fields.StrField(allow_none=True)\n file_name = fields.StrField(required=True)\n file_size = fields.IntField(required=True)\n file_type = fields.StrField(allow_none=True)\n mime_type = fields.StrField(allow_none=True)\n caption = fields.StrField(allow_none=True)\n\n class Meta:\n indexes = ('$file_name', )\n collection_name = COLLECTION_NAME" }, { "identifier": "get_size", "path": "utils.py", "snippet": "def get_size(size):\n \"\"\"Get size in readable format\"\"\"\n\n units = [\"Bytes\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"]\n size = float(size)\n i = 0\n while size >= 1024.0 and i < len(units):\n i += 1\n size /= 1024.0\n return \"%.2f %s\" % (size, units[i])" }, { "identifier": "temp", "path": "utils.py", "snippet": "class temp(object):\n BANNED_USERS = []\n BANNED_CHATS = []\n ME = None\n CURRENT=int(os.environ.get(\"SKIP\", 2))\n CANCEL = False\n MELCOW = {}\n U_NAME = None\n B_NAME = None\n GETALL = {}\n SHORT = {}\n SETTINGS = {}" }, { "identifier": "get_settings", "path": "utils.py", "snippet": "async def get_settings(group_id):\n settings = temp.SETTINGS.get(group_id)\n if not settings:\n settings = await db.get_settings(group_id)\n temp.SETTINGS[group_id] = settings\n return settings" }, { "identifier": "script", "path": "Script.py", "snippet": "class script(object):\r\n START_TXT = \"\"\"<b>Hᴇʟʟᴏ 👋 {}</b>\r\n\r\n<b>Mʏ Nᴀᴍᴇ Is <a href=\"https://t.me/{}\">{}</a>, I Cᴀɴ Pʀᴏᴠɪᴅᴇ Mᴏᴠɪᴇs, Sᴇʀɪᴇs, Aɴɪᴍᴀᴛɪᴏɴ, Cᴀʀᴛᴏᴏɴ, Aɴɪᴍᴇ, K-Dʀᴀᴍᴀ & Mᴀɴʏ Mᴏʀᴇ ☺ Jᴜsᴛ Aᴅᴅ Mᴇ Tᴏ Yᴏᴜʀ Gʀᴏᴜᴘ As Aᴅᴍɪɴ EɴJᴏʏ 😍</b>\"\"\"\r\n\r\n HELP_TXT = \"\"\"<b>Hᴇʀᴇ Is Tʜᴇ Hᴇʟᴘ Fᴏʀ Mʏ Cᴏᴍᴍᴀɴᴅs.</b>\"\"\"\r\n \r\n ABOUT_TXT = \"\"\"\r\n<b>‣ ᴍʏ ɴᴀᴍᴇ : <a href=\"https://t.me/{}\">ʙᴏᴛ</a>\r\n‣ ᴄʀᴇᴀᴛᴏʀ : <a href=\"https://t.me/KUSHALHK\">𝐊𝐔𝐒𝐇𝐀𝐋</a>\r\n‣ ʟɪʙʀᴀʀʏ : <a href=\"https://pyrogram.org/\">ᴘʏʀᴏɢʀᴀᴍ</a>\r\n‣ ʟᴀɴɢᴜᴀɢᴇ : <a href=\"https://www.python.org/\">ᴘʏᴛʜᴏɴ</a>\r\n‣ ᴅᴀᴛᴀʙᴀꜱᴇ : <a href=\"https://www.mongodb.com/\">ᴍᴏɴɢᴏ ᴅʙ</a>\r\n‣ ʜᴏꜱᴛᴇᴅ ᴏɴ : <a href=\"https://render.com/\">Render</a>\r\n‣ ʙᴜɪʟᴅ ꜱᴛᴀᴛᴜꜱ : ᴠ.𝟹.𝟶 [ꜱᴛᴀʙʟᴇ]</b>\"\"\"\r\n \r\n DISCLAIMER_TXT = \"\"\"<b>ᴛʜɪꜱ ɪꜱ ᴀɴ ᴏᴘᴇɴ ꜱᴏᴜʀᴄᴇ ᴘʀᴏᴊᴇᴄᴛ.\r\n\r\nᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜɪꜱ ʙᴏᴛ ᴀʀᴇ ꜰʀᴇᴇʟʏ ᴀᴠᴀɪʟᴀʙʟᴇ ᴏɴ ᴛʜᴇ ɪɴᴛᴇʀɴᴇᴛ ᴏʀ ᴘᴏꜱᴛᴇᴅ ʙʏ ꜱᴏᴍᴇʙᴏᴅʏ ᴇʟꜱᴇ. ᴊᴜꜱᴛ ꜰᴏʀ ᴇᴀꜱʏ ꜱᴇᴀʀᴄʜɪɴɢ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ɪɴᴅᴇxɪɴɢ ꜰɪʟᴇꜱ ᴡʜɪᴄʜ ᴀʀᴇ ᴀʟʀᴇᴀᴅʏ ᴜᴘʟᴏᴀᴅᴇᴅ ᴏɴ ᴛᴇʟᴇɢʀᴀᴍ. ᴡᴇ ʀᴇꜱᴘᴇᴄᴛ ᴀʟʟ ᴛʜᴇ ᴄᴏᴘʏʀɪɢʜᴛ ʟᴀᴡꜱ ᴀɴᴅ ᴡᴏʀᴋꜱ ɪɴ ᴄᴏᴍᴘʟɪᴀɴᴄᴇ ᴡɪᴛʜ ᴅᴍᴄᴀ ᴀɴᴅ ᴇᴜᴄᴅ. ɪꜰ ᴀɴʏᴛʜɪɴɢ ɪꜱ ᴀɢᴀɪɴꜱᴛ ʟᴀᴡ ᴘʟᴇᴀꜱᴇ ᴄᴏɴᴛᴀᴄᴛ ᴍᴇ ꜱᴏ ᴛʜᴀᴛ ɪᴛ ᴄᴀɴ ʙᴇ ʀᴇᴍᴏᴠᴇᴅ ᴀꜱᴀᴘ. ɪᴛ ɪꜱ ꜰᴏʀʙɪᴅᴅᴇɴ ᴛᴏ ᴅᴏᴡɴʟᴏᴀᴅ, ꜱᴛʀᴇᴀᴍ, ʀᴇᴘʀᴏᴅᴜᴄᴇ, ꜱʜᴀʀᴇ ᴏʀ ᴄᴏɴꜱᴜᴍᴇ ᴄᴏɴᴛᴇɴᴛ ᴡɪᴛʜᴏᴜᴛ ᴇxᴘʟɪᴄɪᴛ ᴘᴇʀᴍɪꜱꜱɪᴏɴ ꜰʀᴏᴍ ᴛʜᴇ ᴄᴏɴᴛᴇɴᴛ ᴡɪᴛʜᴏᴜᴛ ᴇxᴘʟɪᴄɪᴛ ᴘᴇʀᴍɪꜱꜱɪᴏɴ ꜰʀᴏᴍ ᴛʜᴇ ᴄᴏɴᴛᴇɴᴛ ᴄʀᴇᴀᴛᴏʀ ᴏʀ ʟᴇɢᴀʟ ᴄᴏᴘʏʀɪɢʜᴛ ʜᴏʟᴅᴇʀ. ɪꜰ ʏᴏᴜ ʙᴇʟɪᴇᴠᴇ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ᴠɪᴏʟᴀᴛɪɴɢ ʏᴏᴜʀ ɪɴᴛᴇʟʟᴇᴄᴛᴜᴀʟ ᴘʀᴏᴘᴇʀᴛʏ, ᴄᴏɴᴛᴀᴄᴛ ᴛʜᴇ ʀᴇꜱᴘᴇᴄᴛɪᴠᴇ ᴄʜᴀɴɴᴇʟꜱ ꜰᴏʀ ʀᴇᴍᴏᴠᴀʟ. ᴛʜᴇ ʙᴏᴛ ᴅᴏᴇꜱ ɴᴏᴛ ᴏᴡɴ ᴀɴʏ ᴏꜰ ᴛʜᴇꜱᴇ ᴄᴏɴᴛᴇɴᴛꜱ, ɪᴛ ᴏɴʟʏ ɪɴᴅᴇx ᴛʜᴇ ꜰɪʟᴇꜱ ꜰʀᴏᴍ ᴛᴇʟᴇɢʀᴀᴍ.\r\n\r\nᴍᴀɪɴᴛᴀɪɴᴇᴅ ʙʏ : <a href=\"https://t.me/KUSHALHK\">𝐊𝐔𝐒𝐇𝐀𝐋</a></b>\"\"\"\r\n\r\n SOURCE_TXT = \"\"\"\r\n<b>Hᴇʏ, Tʜɪs ɪs ᴀ Oᴘᴇɴ Sᴏᴜʀᴄᴇ Pʀᴏᴊᴇᴄᴛ.\r\n\r\nTʜɪs Bᴏᴛ ʜᴀs Lᴀᴛᴇsᴛ ᴀɴᴅ Aᴅᴠᴀɴᴄᴇᴅ Fᴇᴀᴛᴜʀᴇs⚡️\r\n\r\nFork our repository and give star ⭐- <a href='https://github.com/Kushalhk/AutoFilter'>📥 ᴄʟɪᴄᴋ ʜᴇʀᴇ 📥</a></b>\r\n\"\"\"\r\n \r\n KUSHAL_TXT = \"\"\" \r\n<b>🔥 ᴘʀᴇᴍɪᴜᴍ ғᴇᴀᴛᴜʀᴇs 🔥\r\n\r\n➻ ɴᴏ ɴᴇᴇᴅ ᴛᴏ ᴠᴇʀɪғʏ\r\n➻ ᴅɪʀᴇᴄᴛ ғɪʟᴇs\r\n➻ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ\r\n➻ ʜɪɢʜ-sᴘᴇᴇᴅ ᴅᴏᴡɴʟᴏᴀᴅ ʟɪɴᴋ\r\n➻ ᴜɴʟɪᴍɪᴛᴇᴅ ᴍᴏᴠɪᴇs ᴀɴᴅ sᴇʀɪᴇs\r\n➻ ғᴜʟʟ ᴀᴅᴍɪɴ sᴜᴘᴘᴏʀᴛ \r\n➻ ʀᴇǫᴜᴇsᴛ ᴡɪʟʟ ʙᴇ ᴄᴏᴍᴘʟᴇᴛᴇᴅ ɪɴ 𝟷ʜ ɪғ ᴀᴠᴀɪʟᴀʙʟᴇ\r\n\r\n‼️ ᴄʟɪᴄᴋ ᴏɴ ʙᴇʟᴏᴡ ʙᴜᴛᴛᴏɴ ᴛᴏ ᴄʜᴇᴄᴋ ᴀʟʟ ᴀᴠᴀɪʟᴀʙʟᴇ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴs ᴀɴᴅ ɪᴛ's ᴘʀɪᴄᴇs.</b>\"\"\"\r\n\r\n \r\n SETTINGS_TXT = \"\"\"\r\nHᴇʟᴘ : <b>Sᴇᴛᴛɪɴɢꜱ</b>\r\n \r\n◈ sᴇᴛᴛɪɴɢs ɪs ᴍᴏsᴛ ɪᴍᴘᴏʀᴛᴀɴᴛ ғᴇᴀᴛᴜʀᴇ ɪɴ ᴛʜɪs ʙᴏᴛ.\r\n◈ ʏᴏᴜ ᴄᴀɴ ᴇᴀsɪʟʏ ᴄᴜsᴛᴏᴍɪᴢᴇ ᴛʜɪs ʙᴏᴛ ғᴏʀ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n1. ᴏɴʟʏ ɢʀᴏᴜᴘ ᴀᴅᴍɪɴ ᴄᴀɴ ᴜsᴇ ᴛʜɪs ᴄᴏᴍᴍᴀɴᴅ ᴀɴᴅ ᴄʜᴀɴɢᴇ sᴇᴛᴛɪɴɢs.\r\n2. ɪᴛ ᴡᴏʀᴋs ᴏɴʟʏ ᴡʜᴇɴ ʙᴏᴛ ᴀʟʀᴇᴀᴅʏ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /connect - ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴛᴏ ʙᴏᴛ\r\n• /settings - ᴄʜᴀɴɢᴇ sᴇᴛᴛɪɴɢs ᴀs ʏᴏᴜʀ ᴡɪsʜ \"\"\"\r\n\r\n TELEGRAPH_TXT = \"\"\" Hᴇʟᴘ : <b>Tᴇʟᴇɢʀᴀᴘʜ</b>\r\n\r\n<b>Nᴏᴛᴇ</b>: ᴛʜɪꜱ ᴄᴏᴍᴍᴀɴᴅ ɪꜱ ᴀᴠᴀɪʟᴀʙʟᴇ ɪɴ ɢʀᴏᴜᴘꜱ ᴀɴᴅ ᴘᴍꜱ. ᴀʟꜱᴏ ᴄᴀɴ ʙᴇ ᴜꜱᴇ ʙʏ ᴇᴠᴇʀʏᴏɴᴇ.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs & Usᴀɢᴇ :</b>\r\n• /telegraph - sᴇɴᴅ ᴍᴇ ᴘɪᴄᴛᴜʀᴇ ᴏʀ ᴠɪᴅᴇᴏ ᴜɴᴅᴇʀ 𝟻ᴍʙ\"\"\"\r\n\r\n FONT_TXT = \"\"\"Hᴇʟᴘ : <b>Fᴏɴᴛ</b>\r\n\r\n<b>Nᴏᴛᴇ</b>: ʏᴏᴜ ᴄᴀɴ ᴜꜱᴇ ᴛʜɪꜱ ᴍᴏᴅᴇ ᴛᴏ ᴄʜᴀɴɢᴇ ʏᴏᴜʀ ꜰᴏɴᴛꜱ ꜱᴛʏʟᴇ, ᴊᴜꜱᴛ ꜱᴇɴᴅ ᴍᴇ ʟɪᴋᴇ ᴛʜɪꜱ ꜰᴏʀᴍᴀᴛ. \r\n\r\n<code>/font TG_LINKS_CHANNEL</code>\"\"\"\r\n\r\n MANUELFILTER_TXT = \"\"\"Hᴇʟᴘ : <b>Fɪʟᴛᴇʀꜱ</b>\r\n \r\n◈ ꜰɪʟᴛᴇʀ ɪꜱ ᴀ ꜰᴇᴀᴛᴜʀᴇ ᴡᴇʀᴇ ᴜꜱᴇʀꜱ ᴄᴀɴ ꜱᴇᴛ ᴀᴜᴛᴏᴍᴀᴛᴇᴅ ʀᴇᴘʟɪᴇꜱ ꜰᴏʀ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ᴋᴇʏᴡᴏʀᴅ ᴀɴᴅ ɪ ᴡɪʟʟ ʀᴇꜱᴘᴏɴᴅ ᴡʜᴇɴᴇᴠᴇʀ ᴀ ᴋᴇʏᴡᴏʀᴅ ɪꜱ ꜰᴏᴜɴᴅ ɪɴ ᴛʜᴇ ᴍᴇꜱꜱᴀɢᴇ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n1. ᴛʜɪꜱ ʙᴏᴛ ꜱʜᴏᴜʟᴅ ʜᴀᴠᴇ ᴀᴅᴍɪɴ ᴘʀɪᴠɪʟᴇɢᴇ.\r\n2. ᴏɴʟʏ ᴀᴅᴍɪɴꜱ ᴄᴀɴ ᴀᴅᴅ ꜰɪʟᴛᴇʀꜱ ɪɴ ᴀ ᴄʜᴀᴛ.\r\n3. ᴀʟᴇʀᴛ ʙᴜᴛᴛᴏɴꜱ ʜᴀᴠᴇ ᴀ ʟɪᴍɪᴛ ᴏꜰ 64 ᴄʜᴀʀᴀᴄᴛᴇʀꜱ.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /filter - ᴀᴅᴅ ᴀ ꜰɪʟᴛᴇʀ ɪɴ ᴀ ᴄʜᴀᴛ\r\n• /filters - ʟɪꜱᴛ ᴀʟʟ ᴛʜᴇ ꜰɪʟᴛᴇʀꜱ ᴏꜰ ᴀ ᴄʜᴀᴛ\r\n• /del - ᴅᴇʟᴇᴛᴇ ᴀ ꜱᴘᴇᴄɪꜰɪᴄ ꜰɪʟᴛᴇʀ ɪɴ ᴀ ᴄʜᴀᴛ\r\n• /delall - ᴅᴇʟᴇᴛᴇ ᴛʜᴇ ᴡʜᴏʟᴇ ꜰɪʟᴛᴇʀꜱ ɪɴ ᴀ ᴄʜᴀᴛ (ᴄʜᴀᴛ ᴏᴡɴᴇʀ ᴏɴʟʏ)\"\"\"\r\n\r\n BUTTON_TXT = \"\"\"Hᴇʟᴘ : <b>Bᴜᴛᴛᴏɴꜱ</b>\r\n \r\n◈ ᴛʜɪꜱ ʙᴏᴛ ꜱᴜᴘᴘᴏʀᴛꜱ ʙᴏᴛʜ ᴜʀʟ ᴀɴᴅ ᴀʟᴇʀᴛ ɪɴʟɪɴᴇ ʙᴜᴛᴛᴏɴꜱ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n𝟷. ᴛᴇʟᴇɢʀᴀᴍ ᴡɪʟʟ ɴᴏᴛ ᴀʟʟᴏᴡꜱ ʏᴏᴜ ᴛᴏ ꜱᴇɴᴅ ʙᴜᴛᴛᴏɴꜱ ᴡɪᴛʜᴏᴜᴛ ᴀɴʏ ᴄᴏɴᴛᴇɴᴛ, ꜱᴏ ᴄᴏɴᴛᴇɴᴛ ɪꜱ ᴍᴀɴᴅᴀᴛᴏʀʏ.\r\n𝟸. ᴛʜɪꜱ ʙᴏᴛ ꜱᴜᴘᴘᴏʀᴛꜱ ʙᴜᴛᴛᴏɴꜱ ᴡɪᴛʜ ᴀɴʏ ᴛᴇʟᴇɢʀᴀᴍ ᴍᴇᴅɪᴀ ᴛʏᴘᴇ.\r\n𝟹. ʙᴜᴛᴛᴏɴꜱ ꜱʜᴏᴜʟᴅ ʙᴇ ᴘʀᴏᴘᴇʀʟʏ ᴘᴀʀꜱᴇᴅ ᴀꜱ ᴍᴀʀᴋᴅᴏᴡɴ ꜰᴏʀᴍᴀᴛ\r\n\r\nᴜʀʟ ʙᴜᴛᴛᴏɴꜱ :\r\n<code>[Button Text](buttonurl:https://t.me/TG_LINKS_CHANNEL)</code>\r\n\r\nᴀʟᴇʀᴛ ʙᴜᴛᴛᴏɴꜱ :\r\n<code>[Button Text](buttonalert:ᴛʜɪꜱ ɪꜱ ᴀɴ ᴀʟᴇʀᴛ ᴍᴇꜱꜱᴀɢᴇ)</code>\"\"\"\r\n\r\n AUTOFILTER_TXT = \"\"\"Hᴇʟᴘ : <b>Aᴜᴛᴏ Fɪʟᴛᴇʀ</b>\r\n    \r\n<b>Nᴏᴛᴇ :</b> Fɪʟᴇ Iɴᴅᴇx\r\n𝟷. ᴍᴀᴋᴇ ᴍᴇ ᴛʜᴇ ᴀᴅᴍɪɴ ᴏꜰ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ɪꜰ ɪᴛ'ꜱ ᴘʀɪᴠᴀᴛᴇ.\r\n𝟸. ᴍᴀᴋᴇ ꜱᴜʀᴇ ᴛʜᴀᴛ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ᴅᴏᴇꜱ ɴᴏᴛ ᴄᴏɴᴛᴀɪɴꜱ ᴄᴀᴍʀɪᴘꜱ, ᴘᴏʀɴ ᴀɴᴅ ꜰᴀᴋᴇ ꜰɪʟᴇꜱ.\r\n𝟹. ꜰᴏʀᴡᴀʀᴅ ᴛʜᴇ ʟᴀꜱᴛ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴍᴇ ᴡɪᴛʜ ǫᴜᴏᴛᴇꜱ. ɪ'ʟʟ ᴀᴅᴅ ᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜᴀᴛ ᴄʜᴀɴɴᴇʟ ᴛᴏ ᴍʏ ᴅʙ.\r\n\r\n<b>Nᴏᴛᴇ :</b> Aᴜᴛᴏ Fɪʟᴛᴇʀ\r\n𝟷. Aᴅᴅ ᴛʜᴇ ʙᴏᴛ ᴀs ᴀᴅᴍɪɴ ᴏɴ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\r\n𝟸. Usᴇ /connect ᴀɴᴅ ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴛᴏ ᴛʜᴇ ʙᴏᴛ.\r\n𝟹. Usᴇ /settings ᴏɴ ʙᴏᴛ's ᴘᴍ ᴀɴᴅ ᴛᴜʀɴ ᴏɴ AᴜᴛᴏFɪʟᴛᴇʀ ᴏɴ ᴛʜᴇ sᴇᴛᴛɪɴɢs ᴍᴇɴᴜ.\"\"\"\r\n\r\n \r\n RULE_TXT = \"\"\"♦ 𝗚𝗿𝗼𝘂𝗽 𝗥𝘂𝗹𝗲𝘀 ♦\r\n\r\n◈ <b>Sᴇᴀʀᴄʜ Mᴏᴠɪᴇ Wɪᴛʜ Cᴏʀʀᴇᴄᴛ Sᴘᴇʟʟɪɴɢ:</b>\r\n• ᴀᴠᴀᴛᴀʀ 𝟸𝟶𝟶𝟿 ✅\r\n• ᴀᴠᴀᴛᴀʀ ʜɪɴᴅɪ ✅\r\n• ᴀᴠᴀᴛᴀʀ ᴍᴏᴠɪᴇ ❌\r\n• ᴀᴠᴀᴛᴀʀ ʜɪɴᴅɪ ᴅᴜʙʙᴇᴅ..❌\r\n\r\n◈ <b>Sᴇᴀʀᴄʜ Wᴇʙ Sᴇʀɪᴇs Iɴ ᴛʜɪs Fᴏʀᴍᴀᴛ:</b>\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷 ✅\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷E𝟶𝟷 ✅\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷 ʜɪɴᴅɪ ✅\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷 ʜɪɴᴅɪ ᴅᴜʙʙ... ❌\r\n• ᴠɪᴋɪɴɢs sᴇᴀsᴏɴ 𝟷 ❌\r\n• ᴠɪᴋɪɴɢs ᴡᴇʙ sᴇʀɪᴇs ❌\r\n\r\n<b>➙ ᴅᴏɴ'ᴛ ᴅᴏ ᴀɴʏ ꜱᴇʟꜰ ᴘʀᴏᴍᴏᴛɪᴏɴ. \r\n➙ ᴅᴏɴ'ᴛ ꜱᴇɴᴅ ᴀɴʏ ᴋɪɴᴅ ᴏꜰ ᴘʜᴏᴛᴏ, ᴠɪᴅᴇᴏ, ᴅᴏᴄᴜᴍᴇɴᴛꜱ, ᴜʀʟ, ᴇᴛᴄ...\r\n➙ ᴅᴏɴ'ᴛ ʀᴇǫᴜᴇꜱᴛ ᴀɴʏ ᴛʜɪɴɢꜱ ᴏᴛʜᴇʀ ᴛʜᴀɴ ᴍᴏᴠɪᴇꜱ, ꜱᴇʀɪᴇꜱ, ᴀɴɪᴍᴀᴛɪᴏɴ, ᴄᴀʀᴛᴏᴏɴ, ᴀɴɪᴍᴇ, ᴋ-ᴅʀᴀᴍᴀ ᴍᴀɴʏ ᴍᴏʀᴇ.</b>\r\n\r\n🔰 <b>Nᴏᴛᴇ :</b> ᴀʟʟ ᴍᴇꜱꜱᴀɢᴇꜱ ᴡɪʟʟ ʙᴇ ᴀᴜᴛᴏ-ᴅᴇʟᴇᴛᴇᴅ ᴀꜰᴛᴇʀ 𝟷𝟶 ᴍɪɴᴜᴛᴇꜱ ᴛᴏ ᴀᴠᴏɪᴅ ᴄᴏᴘʏʀɪɢʜᴛ ɪꜱꜱᴜᴇꜱ.\"\"\"\r\n\r\n CONNECTION_TXT = \"\"\"Hᴇʟᴘ : <b>Cᴏɴɴᴇᴄᴛɪᴏɴꜱ</b>\r\n \r\n◈ ᴜꜱᴇᴅ ᴛᴏ ᴄᴏɴɴᴇᴄᴛ ʙᴏᴛ ᴛᴏ ᴘᴍ ꜰᴏʀ ᴍᴀɴᴀɢɪɴɢ ꜰɪʟᴛᴇʀꜱ \r\n◈ ɪᴛ ʜᴇʟᴘꜱ ᴛᴏ ᴀᴠᴏɪᴅ ꜱᴘᴀᴍᴍɪɴɢ ɪɴ ɢʀᴏᴜᴘꜱ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n1. ᴏɴʟʏ ᴀᴅᴍɪɴꜱ ᴄᴀɴ ᴀᴅᴅ ᴀ ᴄᴏɴɴᴇᴄᴛɪᴏɴ.\r\n2. ꜱᴇɴᴅ /ᴄᴏɴɴᴇᴄᴛ ꜰᴏʀ ᴄᴏɴɴᴇᴄᴛɪɴɢ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ᴘᴍ\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /connect - ᴄᴏɴɴᴇᴄᴛ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ᴄʜᴀᴛ ᴛᴏ ʏᴏᴜʀ ᴘᴍ\r\n• /disconnect - ᴅɪꜱᴄᴏɴɴᴇᴄᴛ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ\r\n• /connections - ʟɪꜱᴛ ᴀʟʟ ʏᴏᴜʀ ᴄᴏɴɴᴇᴄᴛɪᴏɴꜱ\"\"\"\r\n\r\n EXTRAMOD_TXT = \"\"\"Hᴇʟᴘ : <b>Exᴛʀᴀ Mᴏᴅᴜʟᴇs</b>\r\n \r\n<b>Nᴏᴛᴇ :</b>\r\nᴛʜᴇꜱᴇ ᴀʀᴇ ᴛʜᴇ ᴇxᴛʀᴀ ꜰᴇᴀᴛᴜʀᴇꜱ ᴏꜰ ᴛʜɪꜱ ʙᴏᴛ\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /id - ɢᴇᴛ ɪᴅ ᴏꜰ ᴀ ꜱᴘᴇᴄɪꜰɪᴇᴅ ᴜꜱᴇʀ.\r\n• /info - ɢᴇᴛ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ᴀʙᴏᴜᴛ ᴀ ᴜꜱᴇʀ.\r\n• /imdb - ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ɪᴍᴅʙ ꜱᴏᴜʀᴄᴇ.\r\n• /search - ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ᴠᴀʀɪᴏᴜꜱ ꜱᴏᴜʀᴄᴇꜱ.\"\"\"\r\n\r\n ADMIN_TXT = \"\"\"<b>Nᴏᴛᴇ :</b> Tʜɪs Mᴏᴅᴜʟᴇ Oɴʟʏ Wᴏʀᴋs Fᴏʀ Mʏ Aᴅᴍɪɴs.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /logs - ᴛᴏ ɢᴇᴛ ᴛʜᴇ ʀᴇᴄᴇɴᴛ ᴇʀʀᴏʀꜱ\r\n• /stats - ᴛᴏ ɢᴇᴛ ꜱᴛᴀᴛᴜꜱ ᴏꜰ ꜰɪʟᴇꜱ ɪɴ ᴅʙ. <b>[Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</b>\r\n• /delete - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ ꜱᴘᴇᴄɪꜰɪᴄ ꜰɪʟᴇ ꜰʀᴏᴍ ᴅʙ.\r\n• /users - ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴜꜱᴇʀꜱ ᴀɴᴅ ɪᴅꜱ.\r\n• /chats - ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴄʜᴀᴛꜱ ᴀɴᴅ ɪᴅꜱ\r\n• /leave - ᴛᴏ ʟᴇᴀᴠᴇ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ.\r\n• /disable - ᴛᴏ ᴅɪꜱᴀʙʟᴇ ᴀ ᴄʜᴀᴛ.\r\n• /ban - ᴛᴏ ʙᴀɴ ᴀ ᴜꜱᴇʀ.\r\n• /unban - ᴛᴏ ᴜɴʙᴀɴ ᴀ ᴜꜱᴇʀ.\r\n• /channel - ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴛᴏᴛᴀʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴄʜᴀɴɴᴇʟꜱ. \r\n• /broadcast - ᴛᴏ ʙʀᴏᴀᴅᴄᴀꜱᴛ ᴀ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴀʟʟ ᴜꜱᴇʀꜱ. \r\n• /grp_broadcast - Tᴏ ʙʀᴏᴀᴅᴄᴀsᴛ ᴀ ᴍᴇssᴀɢᴇ ᴛᴏ ᴀʟʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ɢʀᴏᴜᴘs.\r\n• /gfilter - ᴛᴏ ᴀᴅᴅ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs. \r\n• /gfilters - ᴛᴏ ᴠɪᴇᴡ ʟɪsᴛ ᴏғ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs. \r\n• /delg - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ sᴘᴇᴄɪғɪᴄ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ. \r\n• /request - ᴛᴏ sᴇɴᴅ ᴀ ᴍᴏᴠɪᴇ/sᴇʀɪᴇs ʀᴇᴏ̨ᴜᴇsᴛ ᴛᴏ ʙᴏᴛ ᴀᴅᴍɪɴs. ᴏɴʟʏ ᴡᴏʀᴋs ᴏɴ sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ. <b>[Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</b>\r\n• /delallg - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ ɢғɪʟᴛᴇʀs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.\r\n• /deletefiles - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴄᴀᴍʀɪᴘ ᴀɴᴅ ᴘʀᴇ-ᴅᴠᴅ ғɪʟᴇs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.\"\"\"\r\n\r\n STICKER_TXT = \"\"\"<b>yᴏᴜ ᴄᴀɴ ᴜꜱᴇ ᴛʜɪꜱ ᴍᴏᴅᴜʟᴇ ᴛᴏ ꜰɪɴᴅᴀɴy ꜱᴛɪᴄᴋᴇʀꜱ ɪᴅ.\r\n• ᴜꜱᴀɢᴇ :ᴛᴏ ɢᴇᴛ ꜱᴛɪᴄᴋᴇʀ\r\n \r\n⭕ ʜᴏᴡ ᴛᴏ ᴜꜱᴇ\r\n◉ Reply To Any Sticker [/stickerid]\r\n\r\n/𝐬𝐭𝐢𝐜𝐤𝐞𝐫𝐢𝐝 𝐬𝐭𝐢𝐜𝐤𝐞𝐫 𝐢𝐝\r\n\r\n</b>\"\"\"\r\n \r\n STATUS_TXT = \"\"\"<b>⍟─────[ <b>Bᴏᴛ Sᴛᴀᴛᴜs</b> ]─────⍟\r\n    \r\n★ ᴛᴏᴛᴀʟ ꜰɪʟᴇꜱ : <code>{}</code>\r\n★ ᴛᴏᴛᴀʟ ᴜꜱᴇʀꜱ : <code>{}</code>\r\n★ ᴛᴏᴛᴀʟ ɢʀᴏᴜᴘꜱ : <code>{}</code>\r\n★ ᴜꜱᴇᴅ ꜱᴛᴏʀᴀɢᴇ: <code>{}</code>\r\n★ ꜰʀᴇᴇ ꜱᴛᴏʀᴀɢᴇ : <code>{}</code>\r\n\r\n•❅──────✧❅✦❅✧──────❅•</b>\"\"\"\r\n\r\n\r\n LOG_TEXT_G = \"\"\"<b>#NewGroup\r\nGʀᴏᴜᴘ = {}(<code>{}</code>)\r\nTᴏᴛᴀʟ Mᴇᴍʙᴇʀs = <code>{}</code>\r\nAᴅᴅᴇᴅ Bʏ - {}</b>\"\"\"\r\n\r\n LOG_TEXT_P = \"\"\"<b>#NewUser\r\nID - <code>{}</code>\r\nNᴀᴍᴇ - {}</b>\"\"\"\r\n\r\n ALRT_TXT = \"\"\"<b>ʜᴇʟʟᴏ {},\r\nᴛʜɪꜱ ɪꜱ ɴᴏᴛ ʏᴏᴜʀ ᴍᴏᴠɪᴇ ʀᴇQᴜᴇꜱᴛ,\r\nʀᴇǫᴜᴇꜱᴛ ʏᴏᴜʀ'ꜱ...</b>\"\"\"\r\n\r\n OLD_ALRT_TXT = \"\"\"<b>ʜᴇʏ {},\r\nʏᴏᴜ ᴀʀᴇ ᴜꜱɪɴɢ ᴏɴᴇ ᴏꜰ ᴍʏ ᴏʟᴅ ᴍᴇꜱꜱᴀɢᴇꜱ, \r\nᴘʟᴇᴀꜱᴇ ꜱᴇɴᴅ ᴛʜᴇ ʀᴇǫᴜᴇꜱᴛ ᴀɢᴀɪɴ.</b>\"\"\"\r\n\r\n CUDNT_FND = \"\"\"<b>ɪ ᴄᴏᴜʟᴅɴ'ᴛ ꜰɪɴᴅ ᴀɴʏᴛʜɪɴɢ ʀᴇʟᴀᴛᴇᴅ ᴛᴏ {}\r\nᴅɪᴅ ʏᴏᴜ ᴍᴇᴀɴ ᴀɴʏ ᴏɴᴇ ᴏꜰ ᴛʜᴇꜱᴇ?</b>\"\"\"\r\n\r\n I_CUDNT = \"\"\"<b>sᴏʀʀʏ ɴᴏ ꜰɪʟᴇs ᴡᴇʀᴇ ꜰᴏᴜɴᴅ ꜰᴏʀ ʏᴏᴜʀ ʀᴇǫᴜᴇꜱᴛ {} 😕\r\n\r\nMᴏᴠɪᴇs Nᴏᴛ Aᴠᴀɪʟᴀʙʟᴇ Rᴇᴀsᴏɴ:\r\n𝟷. ᴏ.ᴛ.ᴛ ᴏʀ ᴅᴠᴅ ɴᴏᴛ ʀᴇʟᴇᴀsᴇᴅ\r\n𝟸. ᴛʏᴘᴇ ɴᴀᴍᴇ ᴡɪᴛʜ ʏᴇᴀʀ\r\n𝟹. ᴍᴏᴠɪᴇ ɪs ɴᴏᴛ ᴀᴠᴀɪʟᴀʙʟᴇ ɪɴ ᴛʜᴇ ᴅᴀᴛᴀʙᴀsᴇ ʀᴇᴘᴏʀᴛ ᴛᴏ ᴀᴅᴍɪɴs @TG_Bots_Supporter</b>\"\"\"\r\n\r\n I_CUD_NT = \"\"\"<b>ɪ ᴄᴏᴜʟᴅɴ'ᴛ ꜰɪɴᴅ ᴀɴʏ ᴍᴏᴠɪᴇ ʀᴇʟᴀᴛᴇᴅ ᴛᴏ {}.\r\nᴘʟᴇᴀꜱᴇ ᴄʜᴇᴄᴋ ᴛʜᴇ ꜱᴘᴇʟʟɪɴɢ ᴏɴ ɢᴏᴏɢʟᴇ ᴏʀ ɪᴍᴅʙ...</b>\"\"\"\r\n\r\n MVE_NT_FND = \"\"\"<b>ᴍᴏᴠɪᴇ ɴᴏᴛ ꜰᴏᴜɴᴅ ɪɴ ᴅᴀᴛᴀʙᴀꜱᴇ...</b>\"\"\"\r\n\r\n TOP_ALRT_MSG = \"\"\"<b>Cʜᴇᴄᴋɪɴɢ Fᴏʀ Mᴏᴠɪᴇ Iɴ Dᴀᴛᴀʙᴀsᴇ...</b>\"\"\"\r\n\r\n MELCOW_ENG = \"\"\"<b>Hᴇʟʟᴏ {} 😍, Aɴᴅ Wᴇʟᴄᴏᴍᴇ Tᴏ {} Gʀᴏᴜᴘ ❤️\r\n\r\n➻ ʜᴇʀᴇ ʏᴏᴜ ᴄᴀɴ ꜱᴇᴀʀᴄʜ ʏᴏᴜʀ ꜰᴀᴠᴏᴜʀɪᴛᴇ ᴍᴏᴠɪᴇꜱ ᴏʀ ꜱᴇʀɪᴇꜱ ʙʏ ᴊᴜꜱᴛ ᴛʏᴘɪɴɢ ɪᴛ'ꜱ ɴᴀᴍᴇ. \r\n\r\n⚠️ ɪꜰ ʏᴏᴜ ᴀʀᴇ ʜᴀᴠɪɴɢ ᴀɴʏ ᴘʀᴏʙʟᴇᴍ ʀᴇɢᴀʀᴅɪɴɢ ᴅᴏᴡɴʟᴏᴀᴅɪɴɢ ᴏʀ ꜱᴏᴍᴇᴛʜɪɴɢ ᴇʟꜱᴇ ᴛʜᴇɴ ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 👇</b>\"\"\"\r\n \r\n REQINFO = \"\"\"\r\n⚠ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ⚠\r\n\r\nᴀꜰᴛᴇʀ 5 ᴍɪɴᴜᴛᴇꜱ ᴛʜɪꜱ ᴍᴇꜱꜱᴀɢᴇ ᴡɪʟʟ ʙᴇ ᴀᴜᴛᴏᴍᴀᴛɪᴄᴀʟʟʏ ᴅᴇʟᴇᴛᴇᴅ\r\n\r\nɪꜰ ʏᴏᴜ ᴅᴏ ɴᴏᴛ ꜱᴇᴇ ᴛʜᴇ ʀᴇǫᴜᴇsᴛᴇᴅ ᴍᴏᴠɪᴇ / sᴇʀɪᴇs ꜰɪʟᴇ, ʟᴏᴏᴋ ᴀᴛ ᴛʜᴇ ɴᴇxᴛ ᴘᴀɢᴇ\"\"\"\r\n\r\n \r\n\r\n SINFO = \"\"\"\r\n⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯\r\nꜱᴇʀɪᴇꜱ ʀᴇǫᴜᴇꜱᴛ ꜰᴏʀᴍᴀᴛ\r\n⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯\r\n\r\nɢᴏ ᴛᴏ ɢᴏᴏɢʟᴇ ➠ ᴛʏᴘᴇ ꜱᴇʀɪᴇꜱ ɴᴀᴍᴇ ➠ ᴄᴏᴘʏ ᴄᴏʀʀᴇᴄᴛ ɴᴀᴍᴇ ➠ ᴘᴀꜱᴛᴇ ᴛʜɪꜱ ɢʀᴏᴜᴘ\r\n\r\nᴇxᴀᴍᴘʟᴇ : Loki S01E01\r\n\r\n🚯 ᴅᴏɴᴛ ᴜꜱᴇ ➠ ':(!,./)\"\"\"\r\n\r\n NORSLTS = \"\"\"\r\n★ #𝗡𝗼𝗥𝗲𝘀𝘂𝗹𝘁𝘀 ★\r\n\r\n𝗜𝗗 <b>: {}</b>\r\n\r\n𝗡𝗮𝗺𝗲 <b>: {}</b>\r\n\r\n𝗠𝗲𝘀𝘀𝗮𝗴𝗲 <b>: {}</b>🥲\"\"\"\r\n\r\n CAPTION = \"\"\" \r\n🗂 𝗙𝗶𝗹𝗲: <b><font class=smcp>{file_name}</font></b>\r\n📀 𝗦𝗶𝘇𝗲: <b><font class=smcp>{file_size}</font></b>\r\n\r\n<b>🔰 Cʀᴇᴀᴛᴏʀ : <a href=\"https://t.me/KUSHALHK\">𝐊𝐔𝐒𝐇𝐀𝐋</a>\r\n🔰 Cʜᴀɴɴᴇʟ : <a href=\"https://t.me/TG_LINKS_CHANNEL\">𝐌𝐎𝐕𝐈𝐄𝐒 𝐂𝐇𝐀𝐍𝐍𝐄𝐋</a>\r\n🔰 Gʀᴏᴜᴘ : <a href=\"https://t.me/movies_hub_official1\">𝐌𝐎𝐕𝐈𝐄 𝐑𝐄𝐐𝐔𝐄𝐒𝐓 𝐆𝐑𝐎𝐔𝐏</a></b>\"\"\"\r\n \r\n IMDB_TEMPLATE_TXT = \"\"\"\r\n<b>Query: {query}\r\nIMDb Data:\r\n\r\n🧿 𝐓𝐈𝐓𝐋𝐄: <a href={url}>{title}</a>\r\n🎭 𝐆𝐄𝐍𝐑𝐄𝐒: {genres}\r\n📆 𝐘𝐄𝐀𝐑: <a href={url}/releaseinfo>{year}</a>\r\n🌟 𝐑𝐀𝐓𝐈𝐍𝐆: <a href={url}/ratings>{rating}</a> / 10 (Based on {votes} user ratings)</b>\r\n☀️ 𝐋𝐀𝐍𝐆𝐔𝐀𝐆𝐄 : <code>{languages}</code></a>\r\n📀 𝐑𝐔𝐍𝐓𝐈𝐌𝐄: {runtime} Minutes</a>\r\n\r\n<b>👨‍💼 Requested by : {message.from_user.mention}</b>\"\"\"\r\n\r\n \r\n ALL_FILTERS = \"\"\"\r\n<b>Hᴇʏ {}, Tʜᴇsᴇ ᴀʀᴇ ᴍʏ ᴛʜʀᴇᴇ ᴛʏᴘᴇs ᴏғ ғɪʟᴛᴇʀs.</b>\"\"\"\r\n \r\n GFILTER_TXT = \"\"\"Hᴇʟᴘ : <b>Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs</b>\r\n \r\n◈ Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs ᴀʀᴇ ᴛʜᴇ ғɪʟᴛᴇʀs sᴇᴛ ʙʏ ʙᴏᴛ ᴀᴅᴍɪɴs ᴡʜɪᴄʜ ᴡɪʟʟ ᴡᴏʀᴋ ᴏɴ ᴀʟʟ ɢʀᴏᴜᴘs.\r\n \r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /gfilter - Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.\r\n• /gfilters - Tᴏ ᴠɪᴇᴡ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs.\r\n• /delg - Tᴏ ᴅᴇʟᴇᴛᴇ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.\r\n• /delallg - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ ɢʟᴏʙᴀʟ ꜰɪʟᴛᴇʀꜱ.\"\"\"\r\n \r\n FILE_STORE_TXT = \"\"\"Hᴇʟᴘ : <b>Fɪʟᴇ Sᴛᴏʀᴇ</b>\r\n \r\n◈ Fɪʟᴇ sᴛᴏʀᴇ ɪs ᴛʜᴇ ғᴇᴀᴛᴜʀᴇ ᴡʜɪᴄʜ ᴡɪʟʟ ᴄʀᴇᴀᴛᴇ ᴀ sʜᴀʀᴇᴀʙʟᴇ ʟɪɴᴋ ᴏғ ᴀ sɪɴɢʟᴇ ᴏʀ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /batch - ᴛᴏ ᴄʀᴇᴀᴛᴇ ᴀ ʙᴀᴛᴄʜ ʟɪɴᴋ ᴏғ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.\r\n• /link - ᴛᴏ ᴄʀᴇᴀᴛᴇ ᴀ sɪɴɢʟᴇ ғɪʟᴇ sᴛᴏʀᴇ ʟɪɴᴋ.\r\n• /pbatch - ᴊᴜsᴛ ʟɪᴋᴇ <code>/batch</code>, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇs ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴs.\r\n• /plink - ᴊᴜsᴛ ʟɪᴋᴇ <code>/link</code>, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇ ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴ.\"\"\"\r\n\r\n CHECK_TXT = \"\"\"\r\n<b>🔥 ᴄʜᴏᴏsᴇ ʏᴏᴜʀ sᴜɪᴛᴀʙʟᴇ ᴘʟᴀɴ ᴀɴᴅ ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ғᴇᴇs ᴜsɪɴɢ ᴀɴʏ ᴜᴘɪ ᴀᴘᴘ. \r\n\r\nᴘʟᴀɴ ᴀ : 𝟷 ᴡᴇᴇᴋ / ₹𝟷𝟻\r\nᴘʟᴀɴ ʙ : 𝟷 ᴍᴏɴᴛʜ / ₹𝟹𝟿\r\nᴘʟᴀɴ ᴄ : 𝟷 ʏᴇᴀʀ / ₹𝟹𝟼𝟶\r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n PLAN1_TXT = \"\"\"\r\n<b>🔥 ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴ ғᴇᴇs ₹𝟷𝟻 ғᴏʀ 𝟷 ᴡᴇᴇᴋ ᴘʀᴇᴍɪᴜᴍ ᴀᴄᴄᴇss ᴡɪᴛʜ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ ᴀɴᴅ ᴍᴀɴʏ ᴍᴏʀᴇ. \r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n PLAN2_TXT = \"\"\"\r\n<b>🔥 ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴ ғᴇᴇs ₹𝟹𝟿 ғᴏʀ 𝟷 ᴍᴏɴᴛʜ ᴘʀᴇᴍɪᴜᴍ ᴀᴄᴄᴇss ᴡɪᴛʜ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ ᴀɴᴅ ᴍᴀɴʏ ᴍᴏʀᴇ. \r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n PLAN3_TXT = \"\"\"\r\n<b>🔥 ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴ ғᴇᴇs ₹𝟹𝟼𝟶 ғᴏʀ 𝟷 ʏᴇᴀʀ ᴘʀᴇᴍɪᴜᴍ ᴀᴄᴄᴇss ᴡɪᴛʜ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ ᴀɴᴅ ᴍᴀɴʏ ᴍᴏʀᴇ. \r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n RESTART_TXT = \"\"\"\r\n<b>Bᴏᴛ Rᴇsᴛᴀʀᴛᴇᴅ !\r\n\r\n📅 Dᴀᴛᴇ : <code>{}</code>\r\n⏰ Tɪᴍᴇ : <code>{}</code>\r\n🌐 Tɪᴍᴇᴢᴏɴᴇ : <code>Asia/Kolkata</code>\r\n🛠️ Bᴜɪʟᴅ Sᴛᴀᴛᴜs: <code>ᴠ𝟹.𝟶 [ Sᴛᴀʙʟᴇ ]</code></b>\"\"\"\r\n\r\n LOGO = \"\"\"\r\n ____ ___ ____ __ ____ ____ \r\n(_ _)/ __) ( _ \\ / \\(_ _)(__ )\r\n )( ( (_ \\ ) _ (( O ) )( / _/ \r\n (__) \\___/ (____/ \\__/ (__) (____)\"\"\"\r" } ]
from pyrogram import Client, filters, enums from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery from pyrogram.errors.exceptions.bad_request_400 import MessageTooLong, PeerIdInvalid from info import ADMINS, LOG_CHANNEL, SUPPORT_CHAT, MELCOW_NEW_USERS, MELCOW_VID, CHNL_LNK, GRP_LNK from database.users_chats_db import db from database.ia_filterdb import Media from utils import get_size, temp, get_settings from Script import script from pyrogram.errors import ChatAdminRequired import asyncio
19,306
"""-----------------------------------------https://t.me/TG_LINKS_CHANNEL--------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members]
"""-----------------------------------------https://t.me/TG_LINKS_CHANNEL--------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members]
if temp.ME in r_j_check:
10
2023-11-03 12:21:26+00:00
24k
apple/ml-reed
reed/algorithms/pebble.py
[ { "identifier": "utils", "path": "BPref/utils.py", "snippet": "def make_env(cfg):\ndef ppo_make_env(env_id, seed):\ndef tie_weights(src, trg):\ndef make_metaworld_env(cfg):\ndef ppo_make_metaworld_env(env_id, seed):\n def __init__(self, *models):\n def __enter__(self):\n def __exit__(self, *args):\n def __init__(self, *models):\n def __enter__(self):\n def __exit__(self, *args):\ndef soft_update_params(net, target_net, tau):\ndef set_seed_everywhere(seed):\ndef make_dir(*path_parts):\ndef weight_init(m):\n def __init__(self,\n input_dim,\n hidden_dim,\n output_dim,\n hidden_depth,\n output_mod=None):\n def forward(self, x):\n def __init__(self, cache_size=1):\n def atanh(x):\n def __eq__(self, other):\n def _call(self, x):\n def _inverse(self, y):\n def log_abs_det_jacobian(self, x, y):\n def __init__(self, loc, scale):\n def mean(self):\n def __init__(self, epsilon=1e-4, shape=(), device=None):\n def update(self, x):\n def update_from_moments(self, batch_mean, batch_var, batch_count):\n def std(self):\ndef update_mean_var_count_from_moments(\n mean, var, count, batch_mean, batch_var, batch_count\n):\ndef mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None):\ndef to_np(t):\nclass eval_mode(object):\nclass train_mode(object):\nclass MLP(nn.Module):\nclass TanhTransform(pyd.transforms.Transform):\nclass SquashedNormal(pyd.transformed_distribution.TransformedDistribution):\nclass TorchRunningMeanStd:\n M2 = m_a + m_b + torch.pow(delta, 2) * count * batch_count / tot_count" }, { "identifier": "Logger", "path": "BPref/logger.py", "snippet": "class Logger(object):\n def __init__(self,\n log_dir,\n save_tb=False,\n log_frequency=10000,\n agent='sac'):\n self._log_dir = log_dir\n self._log_frequency = log_frequency\n if save_tb:\n tb_dir = os.path.join(log_dir, 'tb')\n if os.path.exists(tb_dir):\n try:\n shutil.rmtree(tb_dir)\n except:\n print(\"logger.py warning: Unable to remove tb directory\")\n pass\n self._sw = SummaryWriter(tb_dir)\n else:\n self._sw = None\n # each agent has specific output format for training\n assert agent in AGENT_TRAIN_FORMAT\n train_format = COMMON_TRAIN_FORMAT + AGENT_TRAIN_FORMAT[agent]\n self._train_mg = MetersGroup(os.path.join(log_dir, 'train'),\n formating=train_format)\n self._eval_mg = MetersGroup(os.path.join(log_dir, 'eval'),\n formating=COMMON_EVAL_FORMAT)\n\n def _should_log(self, step, log_frequency):\n log_frequency = log_frequency or self._log_frequency\n return step % log_frequency == 0\n\n def _try_sw_log(self, key, value, step):\n if self._sw is not None:\n self._sw.add_scalar(key, value, step)\n\n def _try_sw_log_video(self, key, frames, step):\n if self._sw is not None:\n frames = torch.from_numpy(np.array(frames))\n frames = frames.unsqueeze(0)\n self._sw.add_video(key, frames, step, fps=30)\n\n def _try_sw_log_histogram(self, key, histogram, step):\n if self._sw is not None:\n self._sw.add_histogram(key, histogram, step)\n\n def log(self, key, value, step, n=1, log_frequency=1):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n if type(value) == torch.Tensor:\n value = value.item()\n self._try_sw_log(key, value / n, step)\n mg = self._train_mg if key.startswith('train') else self._eval_mg\n mg.log(key, value, n)\n\n def log_param(self, key, param, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n self.log_histogram(key + '_w', param.weight.data, step)\n if hasattr(param.weight, 'grad') and param.weight.grad is not None:\n self.log_histogram(key + '_w_g', param.weight.grad.data, step)\n if hasattr(param, 'bias') and hasattr(param.bias, 'data'):\n self.log_histogram(key + '_b', param.bias.data, step)\n if hasattr(param.bias, 'grad') and param.bias.grad is not None:\n self.log_histogram(key + '_b_g', param.bias.grad.data, step)\n\n def log_video(self, key, frames, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_video(key, frames, step)\n\n def log_histogram(self, key, histogram, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_histogram(key, histogram, step)\n\n def dump(self, step, save=True, ty=None):\n if ty is None:\n self._train_mg.dump(step, 'train', save)\n self._eval_mg.dump(step, 'eval', save)\n elif ty == 'eval':\n self._eval_mg.dump(step, 'eval', save)\n elif ty == 'train':\n self._train_mg.dump(step, 'train', save)\n else:\n raise f'invalid log type: {ty}'" }, { "identifier": "TrajectoryReplayBuffer", "path": "BPref/replay_buffer.py", "snippet": "class TrajectoryReplayBuffer:\n \"\"\"\n Buffer to store trajectories of environment transitions. Unlike ReplayBuffer, which stores all transitions in a\n flat manner, transitions are sorted by trajectory. Each trajectory corresponds to an episode.\n \"\"\"\n _RELABEL_BATCH_SIZE = 256\n\n def __init__(self, capacity: int, device: torch.device, window: int = 1, num_envs: t.Optional[int] = None,\n image_observations: t.Optional[t.Union[int, np.ndarray]] = None):\n \"\"\"\n Args:\n capacity: the number of trajectories to hold in memory\n device: the device sampled transitions should be put on\n window: no idea - part of the original code and is used in add_batch(...) which has not yet been refactored\n num_envs: the number of environment instances used to train the policy. Only needs to be specified when the\n number is >1. Some algorithms train on multiple instances of an environment at once, e.g. PPO.\n Not currently used, but not yet removed because we have not tested with an algorithm that needs\n multiple environment instances.\n image_observations: (default = false) whether to collect image observations in addition to state\n observations. This is helpful to use when the policy is trained on the state, but you\n want to visualize the trajectories or the reward model is trained on images.\n\n \"\"\"\n self.capacity = capacity\n self.device = device\n\n self.observations: t.Optional[np.ndarray] = None\n self.actions: t.Optional[np.ndarray] = None\n self.rewards: t.Optional[np.ndarray] = None\n self.not_dones: t.Optional[np.ndarray] = None\n self.not_dones_no_max: t.Optional[np.ndarray] = None\n self.trajectory_lengths: t.List = []\n self.window = window\n self.env_rewards: t.Optional[np.ndarray] = None\n self.image_observations: t.Optional[np.ndarray] = None\n # track whether to collect image observations - when not None, specifies the dimensions of the images\n self._collect_image_observations = image_observations\n\n # track the trajectories as a list of Trajectory\n self.trajectories: t.List[Trajectory] = []\n\n self.idx = 0\n self.last_save = 0\n self.full = False\n\n def __len__(self):\n return np.sum(self.trajectory_lengths) - len(self.trajectory_lengths)\n\n def __getitem__(self, flat_indx: t.Union[int, t.Tuple[int, int], t.List[int]]) -> TRANSITION:\n \"\"\"\n Get the transition at the given index\n\n Args:\n flat_indx: the index assuming transitions are stored flat instead of nested in trajectories\n - when an integer is specified, a single transition is retrieved\n - when a tuple of integers is given, a slice is retrieved as if the transitions are stored flat\n\n Returns:\n current observation\n action\n reward\n next observation\n whether the episode ended\n whether the episode ended without reaching max steps\n image version of current observation (optional)\n \"\"\"\n if isinstance(flat_indx, int) or isinstance(flat_indx, np.int64):\n traj_indx, trans_indx = self._flat_indx_to_trajectory_index(flat_indx)\n # check we are grabbing from a trajectory currently being accumulated\n # When the done signal is given, the current trajectory being accumulated is converted to a trajectory,\n # is added to the list of trajectories, and the values used to accumulate the next trajectory are set to\n # done. The next trajectory is not started until the call to add(...) after the done signal is received.\n # Therefore, we need to check whether the trajectory to pull from is actually the last completed trajectory\n # prior to starting a new trajectory. This is why we compare the length of the lists containing trajectory\n # lengths and the list containing the trajectories.\n if (traj_indx == len(self.trajectory_lengths) - 1\n and len(self.trajectory_lengths) > len(self.trajectories)):\n # we need to grab from the trajectory currently being populated\n return (self.observations[trans_indx].astype(np.float32), self.actions[trans_indx].astype(np.float32),\n self.rewards[trans_indx].astype(np.float32), self.observations[trans_indx + 1].astype(np.float32),\n self.not_dones[trans_indx].astype(np.float32),\n self.not_dones_no_max[trans_indx].astype(np.float32),\n (self.env_rewards[trans_indx].astype(np.float32)\n if self.env_rewards is not None\n else None),\n ((self.image_observations[trans_indx].astype(np.float32))\n if self.image_observations is not None\n else None),\n ((self.image_observations[trans_indx+1].astype(np.float32))\n if self.image_observations is not None\n else None))\n else:\n # grab from a previously completed trajectory\n transition: Transition = self.trajectories[traj_indx][trans_indx]\n return (transition.observation.astype(np.float32), transition.action.astype(np.float32),\n transition.reward.astype(np.float32), transition.next_observation.astype(np.float32),\n transition.not_done.astype(np.float32), transition.not_done_no_max.astype(np.float32),\n transition.env_reward.astype(np.float32),\n (transition.image_observation.astype(np.float32)\n if transition.image_observation is not None\n else None),\n (transition.next_image_observation.astype(np.float32)\n if transition.next_image_observation is not None\n else None))\n elif isinstance(flat_indx, t.List):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n not_dones = []\n not_dones_no_max = []\n env_rewards = []\n image_observations = []\n next_image_observations = []\n for indx in flat_indx:\n observation, action, reward, next_observation, not_done, not_done_no_max, env_reward, image_observation, next_image_observation = self[indx]\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n next_observations.append(next_observation)\n not_dones.append(not_done)\n not_dones_no_max.append(not_done_no_max)\n if env_reward is not None:\n env_rewards.append(env_reward)\n if image_observation is not None:\n image_observations.append(image_observation)\n if next_image_observation is not None:\n next_image_observations.append(next_image_observation)\n return (np.asarray(observations, dtype=np.float32), np.asarray(actions, dtype=np.float32),\n np.asarray(rewards, dtype=np.float32), np.asarray(next_observations, dtype=np.float32),\n np.asarray(not_dones, dtype=np.float32), np.asarray(not_dones_no_max, dtype=np.float32),\n (np.asarray(env_rewards, dtype=np.float32) if len(env_rewards) > 0 else None),\n (np.asarray(image_observations, dtype=np.float32) if self._collect_image_observations else None),\n (np.asarray(next_image_observations, dtype=np.float32) if self._collect_image_observations else None))\n else:\n # get the locations of the start and end transitions\n start_traj_indx, start_trans_indx = self._flat_indx_to_trajectory_index(flat_indx[0])\n end_traj_indx, end_trans_indx = self._flat_indx_to_trajectory_index(flat_indx[1])\n # check that we are not spanning trajectories\n if start_traj_indx == end_traj_indx:\n # grab the sub-trajectory\n sub_trajectory = self.trajectories[start_traj_indx][tuple((start_trans_indx, end_trans_indx))]\n else:\n # grab what remains of the trajectory\n end_trans_indx = len(self.trajectories[start_traj_indx]) - 1\n sub_trajectory = self.trajectories[start_traj_indx][tuple((start_trans_indx, end_trans_indx))]\n return (sub_trajectory.initial_observations,\n sub_trajectory.actions,\n sub_trajectory.rewards,\n sub_trajectory.next_observations,\n sub_trajectory.not_dones,\n sub_trajectory.not_dones_no_max,\n sub_trajectory.env_rewards,\n (sub_trajectory.initial_image_observations\n if sub_trajectory.initial_image_observations is not None\n else None),\n (sub_trajectory.next_image_observations\n if sub_trajectory.next_image_observations is not None\n else None))\n\n @property\n def trajectory_count(self) -> int:\n \"\"\"\n The number of trajectories in the buffer\n \"\"\"\n return len(self.trajectories)\n\n @property\n def all_not_dones(self) -> np.ndarray:\n \"\"\"\n Rewards from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.not_dones, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_rewards(self) -> np.ndarray:\n \"\"\"\n Rewards from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.rewards, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_environment_rewards(self) -> np.ndarray:\n \"\"\"\n Environment rewards from all trajectories and all transitions\n \"\"\"\n return np.concatenate([np.expand_dims(traj.rewards, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_initial_image_observations(self) -> np.ndarray:\n \"\"\"\n Image observations from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.initial_image_observations, axis=0)\n for traj in self.trajectories],\n axis=0)\n\n @property\n def all_next_image_observations(self) -> np.ndarray:\n \"\"\"\n Image observations from the state-action pairs from all trajectories and all transitions,\n\n The result of a transition\n \"\"\"\n return np.concatenate([np.expand_dims(traj.next_image_observations, axis=0)\n for traj in self.trajectories],\n axis=0)\n\n @property\n def all_initial_observations(self) -> np.ndarray:\n \"\"\"\n observations from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.initial_observations, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_next_observations(self) -> np.ndarray:\n \"\"\"\n Observations from the state-action pairs from all trajectories and all transitions\n\n The result of a transition\n \"\"\"\n return np.concatenate([np.expand_dims(traj.next_observations, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_actions(self) -> np.ndarray:\n \"\"\"\n Actions from the state-action pairs from all trajectories and all transitions\n \"\"\"\n return np.concatenate([np.expand_dims(traj.actions, axis=0) for traj in self.trajectories], axis=0)\n\n def _flat_indx_to_trajectory_index(self, flat_indx: int) -> t.Tuple[int, int]:\n \"\"\"\n Converts an index that assumes the transitions are flat to a trajectory and transition (w/in trajectory) index\n\n Args:\n flat_indx: the index assuming transitions are stored flat\n\n Returns:\n the index of the trajectory containing the transition\n the index of the transition within the trajectory\n \"\"\"\n # need to figure out which transition indices are stored in which trajectories\n transition_cumulative_sum = np.cumsum(self.trajectory_lengths)\n # the trajectory containing the transition is at the first index where the cumulative sum of transitions is\n # less than the transition index\n target_trajectory_indx = int(np.argmax(flat_indx < transition_cumulative_sum))\n # get the transition's index within the trajectory as the different between the flat index and the cumulative\n # sum at the previous trajectory - tells us how far into the target trajectory the transition is\n if target_trajectory_indx == 0:\n transition_trajectory_indx = flat_indx\n else:\n transition_trajectory_indx = flat_indx - transition_cumulative_sum[target_trajectory_indx - 1]\n return target_trajectory_indx, transition_trajectory_indx\n\n def _add_transition(self, observation: np.ndarray, action: np.ndarray, reward: float, done: t.Union[float, bool],\n done_no_max: t.Union[float, bool],\n env_reward: t.Optional[float] = None, image_observations: t.Optional[np.ndarray] = None):\n \"\"\"\n Track the transition and update the length of the trajectory currently being accumulated\n\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observations: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n \"\"\"\n self.observations = np.concatenate([self.observations, np.expand_dims(observation, axis=0)], axis=0)\n self.actions = np.concatenate([self.actions, np.expand_dims(action, axis=0)], axis=0)\n self.rewards = np.concatenate([self.rewards, np.asarray(reward).reshape(1, 1)], axis=0)\n if type(done) is float:\n self.not_dones = np.concatenate([self.not_dones,\n np.asarray(not done, dtype=np.float32).reshape(1, 1)], axis=0)\n self.not_dones_no_max = np.concatenate([self.not_dones_no_max,\n np.asarray(not done_no_max, dtype=np.float32).reshape(1, 1)],\n axis=0)\n else:\n self.not_dones = np.concatenate([self.not_dones,\n np.asarray(~done, dtype=np.float32).reshape(1, 1)], axis=0)\n self.not_dones_no_max = np.concatenate([self.not_dones_no_max,\n np.asarray(~done_no_max, dtype=np.float32).reshape(1, 1)],\n axis=0)\n\n self.trajectory_lengths[-1] += 1\n if env_reward is not None:\n self.env_rewards = np.concatenate([self.env_rewards,\n np.asarray(env_reward, dtype=np.float32).reshape(1, 1)], axis=0)\n\n if image_observations is not None and self._collect_image_observations:\n self.image_observations = np.concatenate([self.image_observations, np.expand_dims(image_observations, axis=0)], axis=0)\n\n def _start_trajectory(self, observation: np.ndarray,\n action: np.ndarray,\n reward: float,\n done: t.Union[float, bool],\n done_no_max: t.Union[float, bool],\n env_reward: t.Optional[float] = None,\n image_observations: t.Optional[np.ndarray] = None):\n \"\"\"\n Start a new trajectory and track the transition\n\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observations: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n \"\"\"\n self.observations = np.expand_dims(observation, axis=0).astype(dtype=np.float32)\n self.actions = np.expand_dims(action, axis=0).astype(dtype=np.float32)\n self.rewards = np.asarray(reward, dtype=np.float32).reshape(1, 1)\n if type(done) is float:\n self.not_dones = np.asarray(not done, dtype=np.float32).reshape(1, 1)\n self.not_dones_no_max = np.asarray(not done_no_max, dtype=np.float32).reshape(1, 1)\n else:\n self.not_dones = np.asarray(~done, dtype=np.float32).reshape(1, 1)\n self.not_dones_no_max = np.asarray(~done_no_max, dtype=np.float32).reshape(1, 1)\n\n self.trajectory_lengths.append(1)\n\n if env_reward is not None:\n self.env_rewards = np.asarray(env_reward, dtype=np.float32).reshape(1, 1)\n\n if image_observations is not None and self._collect_image_observations:\n self.image_observations = np.expand_dims(image_observations, axis=0).astype(dtype=np.float32)\n\n def add(self, observation, action, reward, next_observation, done, done_no_max,\n env_reward: t.Optional[float] = None, image_observation: t.Optional[np.ndarray] = None,\n image_next_observation: t.Optional[np.ndarray] = None):\n \"\"\"\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n next_observation: only used when an episode is completed to ensure the last observation is captured\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observation: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n image_next_observation: (optional) the image-based next observation -> should not be given when next_observation is also\n and image. This should be used when you want to accumulate the images separately from the\n trained policy.\n \"\"\"\n if self.observations is None:\n self._start_trajectory(observation, action, reward, done, done_no_max, env_reward, image_observation)\n elif done:\n self._add_transition(observation, action, reward, done, done_no_max, env_reward, image_observation)\n # the episode has ended, so we need to track the next observation\n self.observations = np.concatenate([self.observations, np.expand_dims(next_observation, axis=0)], axis=0)\n if image_next_observation is not None:\n self.image_observations = np.concatenate([self.image_observations,\n np.expand_dims(image_next_observation, axis=0)], axis=0)\n # create the trajectory\n self.trajectories.append(Trajectory(self.observations.astype(dtype=np.float32),\n (self.image_observations.astype(dtype=np.float32)\n if self.image_observations is not None\n else None),\n actions=self.actions.astype(dtype=np.float32),\n rewards=self.rewards.astype(dtype=np.float32),\n not_dones=self.not_dones.astype(dtype=np.float32),\n not_dones_no_max=self.not_dones_no_max.astype(dtype=np.float32),\n env_rewards=self.env_rewards.astype(dtype=np.float32)))\n # check if the inclusion of the just completed trajectory puts the buffer at capacity\n # if it does, remove the first trajectory as this is a FIFO buffer\n if np.sum(self.trajectory_lengths) >= self.capacity:\n self.trajectories = self.trajectories[1:]\n self.trajectory_lengths = self.trajectory_lengths[1:]\n self.observations = None\n self.actions = None\n self.rewards = None\n self.not_dones = None\n self.not_dones_no_max = None\n self.env_rewards = None\n self.image_observations = None\n else:\n self._add_transition(observation, action, reward, done, done_no_max, env_reward, image_observation)\n\n self.idx = (self.idx + 1) % self.capacity\n self.full = self.full or self.idx == 0\n\n def relabel_with_predictor(self, predictor, state_action_formatter: PreProcessInference):\n \"\"\"\n Relabel the rewards stored in the replay buffer using the given predictor\n\n Args:\n predictor: network that will consume state-action pairs and assign a reward\n state_action_formatter: formats the states and actions for consumption by the reward model\n \"\"\"\n print(\"Relabelling the replay buffer with the updated reward model.\")\n for trajectory in self.trajectories:\n # the number of batches to run through the model\n total_iter = int(len(trajectory) / self._RELABEL_BATCH_SIZE)\n # handle the case where we have more transitions than is evenly divisible by the batch size\n if len(trajectory) > self._RELABEL_BATCH_SIZE * total_iter:\n total_iter += 1\n # collect and process each batch to be passed through predictor\n for index in range(total_iter):\n start_indx = index * self._RELABEL_BATCH_SIZE\n # make sure we don't have an end index that is after the end of the trajectory\n end_indx = min((index + 1) * self._RELABEL_BATCH_SIZE, len(trajectory))\n\n # pull out the actions from the transitions that will be relabelled\n actions = trajectory.actions[start_indx:end_indx]\n # we need to handle the case where the reward model operates off of images\n if predictor.image_observations:\n observations = trajectory.all_image_observations[start_indx:end_indx]\n else:\n observations = trajectory.all_observations[start_indx:end_indx]\n formatted_state_action = state_action_formatter.format_state_action(observations, actions, batch_sa=True)\n pred_reward = predictor.r_hat_batch(formatted_state_action)\n # update the rewards assigned to the transitions\n trajectory.rewards[start_indx:end_indx] = pred_reward\n\n def sample(self, batch_size: int):\n indxs = list(np.random.randint(0, np.sum(self.trajectory_lengths) - 1, size=batch_size))\n observations, actions, rewards, next_observations, not_dones, not_dones_no_max, env_rewards, image_observations, next_image_observations = self[indxs]\n observations = torch.as_tensor(observations, device=self.device).float()\n actions = torch.as_tensor(actions, device=self.device)\n rewards = torch.as_tensor(rewards, device=self.device)\n next_observations = torch.as_tensor(next_observations, device=self.device).float()\n not_dones = torch.as_tensor(not_dones, device=self.device)\n not_dones_no_max = torch.as_tensor(not_dones_no_max, device=self.device)\n env_rewards = torch.as_tensor(env_rewards, device=self.device)\n image_observations = (torch.as_tensor(image_observations, device=self.device).float() if self._collect_image_observations else None)\n next_image_observations = (torch.as_tensor(next_image_observations, device=self.device).float() if self._collect_image_observations else None)\n return observations, actions, rewards, next_observations, not_dones, not_dones_no_max, env_rewards, image_observations, next_image_observations\n\n def sample_state_ent(self, batch_size: int):\n observations, actions, rewards, next_observations, not_dones, not_dones_no_max, _, _, _ = self.sample(batch_size)\n full_observation = torch.as_tensor(np.concatenate([traj.all_observations for traj in self.trajectories], axis=0),\n device=self.device)\n return observations, full_observation, actions, rewards, next_observations, not_dones, not_dones_no_max\n\n def save(self, out_directory: Path, env_id: str, step: int):\n \"\"\"\n Save the replay buffer to disk as a npz archive\n Args:\n out_directory: location where replay buffer will be saved\n env_id: the environment within which the data was generated\n step: the number of policy training steps taken to produce this dataset\n \"\"\"\n # create the ZipFile object\n zip_obj = ZipFile(out_directory / f\"{env_id}_replay_buffer_{step}.zip\", \"w\")\n\n # write each trajectory file to disk and to the zip archive\n for traj_id, trajectory in enumerate(self.trajectories):\n trajectory.save(out_directory / f\"{traj_id}.npz\")\n zip_obj.write(out_directory / f\"{traj_id}.npz\")\n # close the Zip File\n zip_obj.close()\n\n @staticmethod\n def from_directory(directory_path: Path,\n device: torch.device = 'cuda') -> \"TrajectoryReplayBuffer\":\n \"\"\"\n Create a TrajectoryReplay buffer from a directory of npz archive trajectories\n\n Args:\n directory_path: the location of the npz_archive on disk\n device: the device sampled transitions should be pushed to\n Returns:\n populated trajectory replay buffer\n \"\"\"\n # accumulate the trajectories\n trajectories = []\n trajectory_lengths = []\n # determine how many transitions are in the replay buffer\n capacity = 0\n # load each trajectory from disk\n for traj_filename in directory_path.iterdir():\n # we only load data from npz archives, so we need to skip anything else\n if not traj_filename.suffix == \".npz\": continue\n # load the trajectory from disk\n traj = Trajectory.from_npz(traj_filename)\n # track the trajectory\n trajectories.append(traj)\n # track the trajectory's length\n trajectory_lengths.append(len(traj))\n # track the trajectory's length\n capacity += len(traj)\n # create the buffer\n _buffer = TrajectoryReplayBuffer(capacity=capacity, device=device)\n # add the trajectories to the buffer\n _buffer.trajectories = trajectories\n _buffer.trajectory_lengths = trajectory_lengths\n\n return _buffer" }, { "identifier": "StateActionRewardModel", "path": "reed/models/reward_model.py", "snippet": "class StateActionRewardModel:\n \"\"\"\n Reward model that operates over state action pairs\n \"\"\"\n def __init__(self,\n in_dim: t.Union[int, t.List[int]],\n ensemble_size: int = 3,\n hidden_dim: int = 256,\n hidden_layers: int = 3,\n final_activation: str = 'tanh',\n lr: float = 3e-4,\n optimizer: str = \"adam\",\n reward_train_batch: int = 128,\n size_segment: int = 1,\n device: torch.device = \"cuda\",\n multi_gpu: bool = False,\n image_observations: bool = False,\n image_encoder_architecture: str = \"pixl2r\",\n image_hidden_num_channels: int = 32,\n grayscale_images: bool = True):\n # the device the model will be put on\n self.device = device\n # whether data parallelism should be used during model training\n self.multi_gpu = multi_gpu\n # reward model configuration\n self.in_dim = in_dim\n self.hidden_dim = hidden_dim\n self.hidden_layers = hidden_layers\n self.ensemble_size = ensemble_size\n self.lr = lr\n self.optimizer_type = optimizer\n self.ensemble = []\n self.paramlst = []\n self.optimizer = None\n self.model = None\n self.final_activation = final_activation\n self.size_segment = size_segment\n\n self.image_observations = image_observations\n self.image_encoder_architecture = image_encoder_architecture\n self.image_hidden_num_channels = image_hidden_num_channels\n self.grayscale_images = grayscale_images\n\n # construct the reward ensemble\n self.construct_ensemble()\n\n # parameters used to train the reward model on the preference labelled trajectories\n self.train_batch_size = reward_train_batch\n self.CEloss = nn.CrossEntropyLoss()\n\n def eval(self):\n \"\"\"Set each reward model in the ensemble to evaluation mode\"\"\"\n self.ensemble = [net.eval() for net in self.ensemble]\n\n def train(self):\n \"\"\"Set each reward model in the ensemble to train mode\"\"\"\n self.ensemble = [net.train() for net in self.ensemble]\n\n def softXEnt_loss(self, predicted: torch.Tensor, target: torch.Tensor):\n logprobs = F.log_softmax(predicted, dim=1)\n return -(target * logprobs).sum() / predicted.shape[0]\n\n def construct_ensemble(self):\n for _ in range(self.ensemble_size):\n if self.image_observations:\n model = ImageStateActionNetwork(self.in_dim,\n out_size=1,\n hidden_dim=self.hidden_dim,\n hidden_depth=self.hidden_layers,\n final_activation=self.final_activation,\n image_encoder_architecture=self.image_encoder_architecture,\n image_hidden_num_channels=self.image_hidden_num_channels).float()\n else:\n model = StateActionNetwork(self.in_dim,\n out_size=1,\n hidden_dim=self.hidden_dim,\n hidden_depth=self.hidden_layers,\n final_activation=self.final_activation).float()\n print(model)\n # check if the model will be run with Data Parallelism\n if self.multi_gpu:\n print(f\"There are {torch.cuda.device_count()} GPU devices, so the reward ensemble WILL be trained \"\n f\"using nn.DataParallel\")\n self.ensemble.append(nn.DataParallel(model).to(self.device))\n else:\n print(f\"There are {torch.cuda.device_count()} GPU devices, so the reward ensemble will NOT be trained \"\n f\"using nn.DataParallel\")\n self.ensemble.append(model.to(self.device))\n # track all model parameters\n self.paramlst.extend(model.parameters())\n # create a single optimizer applied to all ensemble members\n if self.optimizer_type == \"adam\":\n self.optimizer = torch.optim.Adam(self.paramlst, lr=self.lr)\n elif self.optimizer_type == \"sgd\":\n self.optimizer = torch.optim.SGD(self.paramlst, lr=self.lr)\n else:\n raise NotImplementedError(f\"{self.optimizer_type} is not implemented as a reward optimizer and must be \"\n f\"one of 'adam' or 'sgd'.\")\n\n def format_state(self, obs: np.ndarray, batch_states: bool = False, by_trajectory: bool = False):\n \"\"\"\n Args:\n obs: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n # check if the images needs to be converted to grayscale\n if self.grayscale_images:\n obs = _to_grayscale(obs, batch_states=batch_states)\n if batch_states:\n # permute the input so that the channels are in the first dimension\n if by_trajectory:\n obs = np.transpose(obs, (0, 1, 4, 2, 3))\n else:\n print(obs.shape)\n obs = np.transpose(obs, (0, 3, 1, 2))\n return obs\n else:\n # permute the input so that the channels are in the first dimension\n obs = np.transpose(obs, (2, 0, 1))\n # add a dimension along the front for concatenation into the buffer\n return obs.reshape(1, *obs.shape)\n else:\n return obs.reshape(1, obs.shape[1:]) if batch_states else obs.reshape(1, obs.shape[0])\n\n def format_state_action(self, obs: np.ndarray, act: np.ndarray,\n batch_sa: bool = False, by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n act: the actions associated with each state observation\n batch_sa: whether a batch of state-action pairs is to be processed\n by_trajectory: whether the batch of state-action pairs is structured by trajectory -> should only be\n True when batch_sa=True\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n # check if the images needs to be converted to grayscale\n if self.grayscale_images:\n obs = _to_grayscale(obs, batch_states=batch_sa)\n if batch_sa:\n obs_dim = obs.shape[1:]\n # we concatenate the actions along channel dimension of the image\n if by_trajectory:\n repeated_actions = np.tile(act.reshape((act.shape[0], act.shape[1], 1, 1, act.shape[-1])),\n (1, 1, obs_dim[0], obs_dim[1], 1))\n else:\n repeated_actions = np.tile(act.reshape((act.shape[0], 1, 1, act.shape[-1])),\n (1, obs_dim[0], obs_dim[1], 1))\n # now concatenate the two\n sa_t = np.concatenate((obs, repeated_actions), axis=-1)\n # permute the input so that the channels are in the first dimension\n if by_trajectory:\n sa_t = np.transpose(sa_t, (0, 1, 4, 2, 3))\n else:\n sa_t = np.transpose(sa_t, (0, 3, 1, 2))\n return sa_t\n else:\n obs_dim = obs.shape\n # we concatenate the actions along channel dimension of the image\n repeated_actions = np.tile(act.reshape((1, 1, -1)), (obs_dim[0], obs_dim[1], 1))\n # now concatenate the two\n sa_t = np.concatenate((obs, repeated_actions), axis=-1)\n # permute the input so that the channels are in the first dimension\n sa_t = np.transpose(sa_t, (2, 0, 1))\n # add a dimension along the front for concatenation into the buffer\n return sa_t.reshape(1, *self.in_dim)\n else:\n sa_t = np.concatenate([obs, act], axis=-1)\n if batch_sa:\n return sa_t\n else:\n return sa_t.reshape(1, -1)\n\n def p_hat_member(self, x_1: np.ndarray, x_2: np.ndarray, member: int = -1):\n # softmaxing to get the probabilities according to eqn 1\n with torch.no_grad():\n # if we are using image observations, we need to collapse along the batch and time dimensions to push\n # a forward pass through the network\n # to compute the probabilities when then need to re-construct the batch and time dimensions\n if self.image_observations:\n # we need to compute the probabilities in batches to avoid out of memory issues\n # we use the train batch size as it should be an amount safe to put on the GPU's memory without causing\n # issues\n mb_size = self.train_batch_size\n start_indx = 0\n r_hat1 = None\n r_hat2 = None\n while start_indx < x_1.shape[0]:\n # check if there is a mb_size worth of trajectories to still be processed\n if start_indx + mb_size <= x_1.shape[0]:\n mb_x_1 = x_1[start_indx:start_indx + mb_size].reshape((-1, *x_1.shape[2:]))\n mb_x_2 = x_1[start_indx:start_indx + mb_size].reshape((-1, *x_1.shape[2:]))\n else:\n # process the leftover trajectories in a batch smaller than mb_size\n mb_x_1 = x_1[start_indx:].reshape((-1, *x_1.shape[2:]))\n mb_x_2 = x_2[start_indx:].reshape((-1, *x_2.shape[2:]))\n # process the leftover trajectories in a batch smaller than mb_size\n mb_rhat1 = self.r_hat_member(torch.from_numpy(mb_x_1).float().to(self.device),\n member=member).detach().cpu().reshape((mb_size, x_1.shape[1], 1))\n mb_rhat2 = self.r_hat_member(torch.from_numpy(mb_x_2).float().to(self.device),\n member=member).detach().cpu().reshape((mb_size, x_2.shape[1], 1))\n start_indx += mb_size\n\n # accumulate the rhats\n if r_hat1 is None:\n r_hat1 = mb_rhat1\n r_hat2 = mb_rhat2\n else:\n r_hat1 = torch.concat((r_hat1, mb_rhat1), dim=0)\n r_hat2 = torch.concat((r_hat2, mb_rhat2))\n\n else:\n r_hat1 = self.r_hat_member(x_1, member=member).cpu()\n r_hat2 = self.r_hat_member(x_2, member=member).cpu()\n r_hat1 = r_hat1.sum(axis=1)\n r_hat2 = r_hat2.sum(axis=1)\n r_hat = torch.cat([r_hat1, r_hat2], axis=-1)\n # taking 0 index for probability x_1 > x_2\n return F.softmax(r_hat, dim=-1)[:, 0]\n\n def p_hat_entropy(self, x_1: np.ndarray, x_2: np.ndarray, member: int = -1):\n # softmaxing to get the probabilities according to eqn 1\n with torch.no_grad():\n r_hat1 = self.r_hat_member(x_1, member=member)\n r_hat2 = self.r_hat_member(x_2, member=member)\n r_hat1 = r_hat1.sum(axis=1)\n r_hat2 = r_hat2.sum(axis=1)\n r_hat = torch.cat([r_hat1, r_hat2], axis=-1)\n\n ent = F.softmax(r_hat, dim=-1) * F.log_softmax(r_hat, dim=-1)\n ent = ent.sum(axis=-1).abs()\n return ent\n\n def r_hat_member(self, x: torch.Tensor, member: int = -1) -> torch.Tensor:\n # the network parameterizes r hat in eqn 1 from the paper\n # return self.ensemble[member](torch.from_numpy(x).float().to(device))\n return self.ensemble[member](x)\n\n def r_hat(self, x: np.ndarray):\n # they say they average the rewards from each member of the ensemble, but I think this only makes sense if the\n # rewards are already normalized and I don't understand how the normalization should be happening right now :(\n r_hats = []\n for member in range(self.ensemble_size):\n r_hats.append(self.r_hat_member(torch.from_numpy(x).float().to(self.device), member=member).detach().cpu().numpy())\n r_hats = np.array(r_hats)\n return np.mean(r_hats)\n\n def r_hat_batch(self, x: np.ndarray):\n # they say they average the rewards from each member of the ensemble, but I think this only makes sense if the rewards are already normalized\n # but I don't understand how the normalization should be happening right now :(\n r_hats = []\n for member in range(self.ensemble_size):\n r_hats.append(self.r_hat_member(torch.from_numpy(x).float().to(self.device), member=member).detach().cpu().numpy())\n r_hats = np.array(r_hats)\n return np.mean(r_hats, axis=0)\n\n def save(self, model_dir: str, env_id: str, step: int):\n \"\"\"\n Save the reward ensemble to disk\n\n Args:\n model_dir: path where the ensemble is to be saved\n env_id: the environment on which the ensemble has been trained\n step: the number of policy training steps\n \"\"\"\n for member in range(self.ensemble_size):\n torch.save(\n self.ensemble[member].state_dict(), f'{model_dir}/{env_id}_reward_model_{step}_{member}.pt'\n )\n\n def train_reward(self,\n preference_data_loader: PreferenceTripletEnsembleDataLoader,\n num_epoch: int):\n \"\"\"\n Train the reward model on the given preference dataset.\n\n Args:\n preference_data_loader: loads batches of preference triplets. Separated handles different preference\n dataset permutations for each member of the reward's ensemble.\n num_epoch: the number of training epochs to execute\n \"\"\"\n # track the accuracy and loss by ensemble member per epoch\n ensemble_accuracies = np.zeros((num_epoch, self.ensemble_size))\n ensemble_losses = np.zeros((num_epoch, self.ensemble_size))\n\n # train the reward model for the specified number of epochs\n for epoch in range(num_epoch):\n if epoch % 10 == 0:\n print(f\"Running preference training epoch {epoch} of {num_epoch}\")\n epoch_ensemble_losses = np.zeros(self.ensemble_size)\n epoch_ensemble_acc = np.zeros(self.ensemble_size)\n # train on each batch\n for batch_indx, batch in enumerate(preference_data_loader):\n # confirm there is either a single batch to be shared by all networks in the reward ensemble or\n # a batch per network in the ensemble\n assert len(batch) == 1 or len(batch) == self.ensemble_size\n # we need to zero out the gradients before we begin to process this batch\n self.optimizer.zero_grad()\n # we will need to accumulate the loss across the ensemble members\n batch_loss = 0.0\n for member_indx, preference_triplet_batch in enumerate(batch):\n # the predicted reward per transition in each trajectory\n # check if we need to collapse the batch and time dimensions into one and then reconstruct the two\n if self.image_observations:\n # get the rewards for each transition in the trajectories one\n traj_one_shape = preference_triplet_batch.trajectories_one.shape\n formatted_trajectories_one = preference_triplet_batch.trajectories_one.reshape(\n (-1, *traj_one_shape[2:]))\n r_hat1 = self.r_hat_member(formatted_trajectories_one,\n member=member_indx).reshape((traj_one_shape[0],\n traj_one_shape[1], 1))\n # get the rewards for each transition in the trajectories two\n traj_two_shape = preference_triplet_batch.trajectories_two.shape\n formatted_trajectories_two = preference_triplet_batch.trajectories_two.reshape(\n (-1, *traj_two_shape[2:]))\n r_hat2 = self.r_hat_member(formatted_trajectories_two,\n member=member_indx).reshape((traj_two_shape[0],\n traj_two_shape[1], 1))\n else:\n r_hat1 = self.r_hat_member(preference_triplet_batch.trajectories_one,\n member=member_indx)\n r_hat2 = self.r_hat_member(preference_triplet_batch.trajectories_two,\n member=member_indx)\n # compute the return per trajectory\n r_hat1 = r_hat1.sum(axis=1)\n r_hat2 = r_hat2.sum(axis=1)\n\n r_hat = torch.cat([r_hat1, r_hat2], dim=-1)\n\n # compute the ensemble member's loss\n curr_loss = self.CEloss(r_hat, preference_triplet_batch.preference_labels.squeeze())\n # add the loss from the ensemble member to the batch loss\n batch_loss += curr_loss\n # track the loss for this ensemble member\n epoch_ensemble_losses[member_indx] += curr_loss.item()\n\n # compute the accuracy of the ensemble member's predictions\n _, predicted = torch.max(r_hat.data, 1)\n correct = (predicted == preference_triplet_batch.preference_labels.squeeze()).sum().item()\n epoch_ensemble_acc[member_indx] += correct\n # compute the gradients\n batch_loss.backward()\n # apply the gradients to the model\n self.optimizer.step()\n # compute the ensemble accuracy for this epoch\n ensemble_accuracies[epoch] = epoch_ensemble_acc / preference_data_loader.dataset_length()\n # compute the mean ensemble loss for this epoch\n ensemble_losses[epoch] = epoch_ensemble_losses / preference_data_loader.dataset_length()\n\n if epoch % 10 == 0:\n print(f\"Epoch {epoch} mean accuracy = {np.mean(ensemble_accuracies[:epoch + 1]):.2f}\")\n\n # check the current mean accuracy, if it is greater than 0.97 then terminate training\n if np.mean(ensemble_accuracies[epoch]) >= 0.97:\n print(f\"Epoch accuracy {np.mean(ensemble_accuracies[epoch]):.2f} \"\n f\"after {epoch} epochs triggered early stopping.\")\n return ensemble_accuracies[:epoch + 1], ensemble_losses[:epoch + 1]\n\n print(f\"Epoch {num_epoch} mean accuracy = {np.mean(ensemble_accuracies):.2f}\")\n\n return ensemble_accuracies, ensemble_losses" }, { "identifier": "PreferenceDataset", "path": "reed/data/preference_dataset.py", "snippet": "class PreferenceDataset:\n def __init__(self, observation_dim: t.Union[t.Tuple, int], action_dim: t.Union[t.Tuple, int], capacity: int,\n size_segment: int, out_path: Path, image_observations: bool, grayscale_images: bool,\n collect_image_pref_dataset: bool, state_action_formatter: PreProcessInference,\n teacher_beta: float = -1, teacher_gamma: float = 1,\n teacher_eps_mistake: float = 0, teacher_eps_skip: float = 0, teacher_eps_equal: float = 0):\n \"\"\"\n Args:\n observation_dim: the dimensionality of the observations\n action_dim: the dimensionality of the actions\n capacity: the maximum number of trajectory pairs to include in the action_dimtaset\n size_segment: the length of the trajectory segments\n out_path: the location where the preference action_dimtaset will be written to disk during training\n image_observations: whether the observations given to the reward model are images\n grayscale_images: whether the image observations should be converted to grayscale instead of color\n collect_image_pref_dataset: whether to collect the image preference dataset separate from the observations.\n Should NOT be set to true if the observations are images.\n state_action_formatter: function that maps states and actions to a single input\n teacher_beta\n teacher_gamma: used to determine how much influence each reward has on the preference label based on\n order within the trajectory. Used to compute the return\n teacher_eps_mistake: the frequency with which the teacher assigns an incorrect label\n teacher_eps_skip: the frequency with which the teacher does not assign a label\n teacher_eps_equal: the maximum difference between trajectory returns for the two trajectories to be labelled\n as equally preferred\n \"\"\"\n self.observation_dim = observation_dim\n self.action_dim = action_dim\n self.capacity = capacity\n self.size_segment = size_segment\n self.out_path = out_path\n self.image_observations = image_observations\n self.grayscale_images = grayscale_images\n # whether to collect the preference dataset as images\n # only needs to be set to True if we are not learning the reward function from images\n # if we are learning the reward function from images then we have an image dataset\n self.collect_image_pref_dataset = collect_image_pref_dataset\n\n # formats the state-action pairs into a single input to the reward model\n self.state_action_formatter = state_action_formatter\n\n # track where each preference triplet is written to disk\n self._preference_triplet_tracker: t.List[Path] = []\n\n self.buffer_index = 0\n self.buffer_full = False\n\n # create the preference labeller\n self._preference_labeller = _PreferenceLabeller(teacher_beta=teacher_beta, teacher_gamma=teacher_gamma,\n teacher_eps_mistake=teacher_eps_mistake,\n teacher_eps_skip=teacher_eps_skip,\n teacher_eps_equal=teacher_eps_equal)\n\n # make sure the outpath where the trajectories will be written exist\n self.out_path.mkdir(parents=True, exist_ok=True)\n\n def __len__(self):\n return len(self._preference_triplet_tracker)\n\n def __getitem__(self, item: int) -> PREFERENCE_TRIPLET:\n \"\"\"\n Load and return the preference triplet at the specified index in the buffer\n\n Args:\n item: index of the triplet in the buffer\n Returns:\n trajectory one\n trajectory two\n preference label\n \"\"\"\n # get the location of the specified preference triplet and load it into memory\n npz_archive = np.load(self._preference_triplet_tracker[item].as_posix())\n\n # grab the trajectories and preference labels\n trajectory_one = npz_archive[\"trajectory_one\"]\n trajectory_two = npz_archive[\"trajectory_two\"]\n preference_label = npz_archive[\"preference_label\"]\n\n return trajectory_one, trajectory_two, preference_label\n\n def get_batch(self, indices: t.List[int]) -> PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Load and return the batch of preference triplets at the given indices in the buffer\n\n Args:\n indices: the buffer indices of the preference triplets to load into memory\n Returns:\n batch of trajectories one\n batch of trajectories two\n batch of preference labels\n \"\"\"\n # accumulate the trajectory pairs and preference labels\n trajectories_one = []\n trajectories_two = []\n preference_labels = []\n # grab each preference triplet\n for index in indices:\n trajectory_one, trajectory_two, preference_label = self[index]\n trajectories_one.append(np.expand_dims(trajectory_one, axis=0))\n trajectories_two.append(np.expand_dims(trajectory_two, axis=0))\n preference_labels.append(preference_label)\n\n return (np.concatenate(trajectories_one, axis=0), np.concatenate(trajectories_two, axis=0),\n np.concatenate(preference_labels, axis=0))\n\n def _sample_trajectory_segments_uniform(self,\n experience_buffer: TrajectoryReplayBuffer,\n trajectory_count: int,\n mini_batch_size: int) -> t.Tuple[np.ndarray, np.ndarray, t.Optional[np.ndarray]]:\n \"\"\"\n Uniformly sample trajectories and then uniformly sample a segment of the trajectory.\n\n Format and track the state-action pairs from each trajectory segment\n Format and track rewards from each trajectory segment\n\n Combine the formatted state-action pairs and the rewards across trajectory segments\n\n Args:\n experience_buffer: the replay buffer from which trajectory pairs will be drawn\n trajectory_count: the number of trajectories to be sampled from\n mini_batch_size: the number of trajectories to sample\n\n Returns:\n the formatted state-action pairs from random trajectory segments from trajectories\n the rewards from each random trajectory segment\n (optionally) the image observations from each random trajectory segment - only returned when the flag to\n collect image observations in the preference dataset is true and image observations are not\n used to train the reward model\n \"\"\"\n # select the trajectories to be included in this batch of trajectory segments\n trajectory_indices = np.random.choice(trajectory_count, size=mini_batch_size, replace=True)\n\n # accumulate the formatted state-action pairs and rewards from each trajectory segment\n state_action_pairs = []\n rewards = []\n # optionally accumulate image observations\n image_observations = ([] if self.collect_image_pref_dataset and not self.image_observations else None)\n # extract each trajectory and randomly sample a segment\n for traj_index in trajectory_indices:\n # grab the trajectory\n trajectory = experience_buffer.trajectories[traj_index]\n # select a random segment from the trajectory\n traj_segment = trajectory.random_segment(length=self.size_segment)\n # track the rewards associated with the random segment\n rewards.append(np.expand_dims(traj_segment.env_rewards, axis=0))\n # format the state and action based on whether image observations are being used\n if self.image_observations:\n formatted_pair = self.state_action_formatter.format_state_action(\n traj_segment.initial_image_observations,\n traj_segment.actions,\n batch_sa=True)\n else:\n formatted_pair = self.state_action_formatter.format_state_action(\n traj_segment.initial_observations,\n traj_segment.actions,\n batch_sa=True)\n if self.collect_image_pref_dataset:\n image_observations.append(np.expand_dims(traj_segment.initial_image_observations, axis=0))\n # add a dimension in the front so we can concatenate later and the track\n state_action_pairs.append(np.expand_dims(formatted_pair, axis=0))\n return (np.concatenate(state_action_pairs, axis=0),\n np.concatenate(rewards, axis=0),\n (np.concatenate(image_observations, axis=0) if image_observations is not None else None))\n\n @staticmethod\n def get_rank_probability(trajectories_one: np.ndarray, trajectories_two: np.ndarray,\n reward_model: torch.nn.Module):\n \"\"\"\n Compute the preference-prediction disagreement between the ensemble members for each trajectory pair\n\n Args:\n trajectories_one: the trajectories one to be evaluated for ensemble disagreement\n trajectories_two: the trajectories two to be evaluated for ensemble disagreement\n reward_model: the ensemble of networks that will be used to compute disagreement\n \"\"\"\n\n # get probability x_1 > x_2\n probs = []\n for member in range(len(reward_model.ensemble)):\n probs.append(reward_model.p_hat_member(trajectories_one,\n trajectories_two,\n member=member).cpu().numpy())\n probs = np.array(probs)\n\n return np.mean(probs, axis=0), np.std(probs, axis=0)\n\n def get_queries(self, experience_buffer: TrajectoryReplayBuffer, mb_size=20):\n len_traj, max_len = experience_buffer.trajectory_lengths[0], experience_buffer.trajectory_count\n\n # if len(self.experience_buffer.trajectory_lengths[0][-1]) < len_traj:\n # check that the last trajectory contains at least as many transitions as the target segment length\n # we check the last trajectory, because it may be incomplete\n # this is a carry over from the original code. The authors had an assumption that all \"completed\" trajectories\n # will be at least as long as the target segment length\n if experience_buffer.trajectory_lengths[-1] < self.size_segment:\n max_len = max_len - 1\n\n # grab each trajectory, select a random segment from each, format the state-action pairs, and concatenate\n # along the batch dimension\n state_action_pair_traj_one, r_t_1, images_traj_one = self._sample_trajectory_segments_uniform(\n experience_buffer=experience_buffer,\n trajectory_count=max_len,\n mini_batch_size=mb_size)\n state_action_pair_traj_two, r_t_2, images_traj_two = self._sample_trajectory_segments_uniform(\n experience_buffer=experience_buffer,\n trajectory_count=max_len,\n mini_batch_size=mb_size)\n # confirm the image-specific variables are only populated when they should be\n if not self.collect_image_pref_dataset and self.image_observations:\n assert images_traj_one is None and images_traj_two is None\n return state_action_pair_traj_one, state_action_pair_traj_two, r_t_1, r_t_2, images_traj_one, images_traj_two\n\n def put_queries(self, state_action_pair_traj_one: np.ndarray, state_action_pair_traj_two: np.ndarray,\n preference_labels: np.ndarray,\n images_traj_one: t.Optional[np.ndarray] = None, images_traj_two: t.Optional[np.ndarray] = None):\n \"\"\"\n Args:\n state_action_pair_traj_one: the state-action pairs that make up the trajectories one in the queries\n state_action_pair_traj_two: the state-action pairs that make up the trajectories two in the queries\n preference_labels: the preference labels for each pair of trajectories\n images_traj_one: the images for trajectories one\n images_traj_two: the images for trajectories two\n \"\"\"\n # get the number of triplets to be stored\n total_sample = state_action_pair_traj_one.shape[0]\n # write each preference_triplet to disk\n for batch_indx in range(total_sample):\n # get the index of the triplet in the \"buffer\"\n preference_triplet_index = self.buffer_index + batch_indx\n # check if we need to wrap the buffer\n if preference_triplet_index >= self.capacity:\n preference_triplet_index -= self.capacity\n elif not self.buffer_full:\n # this is a previously unseen preference triplet buffer index, so we need to track the triplet location\n self._preference_triplet_tracker.append(self.out_path / f\"preference_triplet_{preference_triplet_index}.npz\")\n # save the preference triplet\n np.savez((self.out_path / f\"preference_triplet_{preference_triplet_index}.npz\").as_posix(),\n trajectory_one=state_action_pair_traj_one[batch_indx],\n trajectory_two=state_action_pair_traj_two[batch_indx],\n preference_label=preference_labels[batch_indx],\n image_trajectory_one=(\n None if images_traj_one is None else images_traj_one[batch_indx]),\n image_trajectory_two=(\n None if images_traj_two is None else images_traj_two[batch_indx]))\n # set the new buffer index\n next_index = self.buffer_index + total_sample\n # check if the buffer has wrapped\n if next_index >= self.capacity:\n self.buffer_full = True\n # wrap the buffer index\n self.buffer_index = next_index - self.capacity\n else:\n self.buffer_index = next_index\n\n def uniform_sampling(self, experience_buffer: TrajectoryReplayBuffer, mb_size: int) -> int:\n \"\"\"\n Grow the preference dataset with preference triplets uniformly sampled from the experience buffer\n\n Args:\n experience_buffer: the replay buffer from which to sample trajectory pairs\n mb_size: target number of preference triplets to add to the preference dataset. Fewer than the target may\n be added depending on the whether labeller skips labelling some trajectories.\n Returns:\n number of preference triplets added to the dataset\n \"\"\"\n # get queries\n sa_t_1, sa_t_2, r_t_1, r_t_2, img_sa_t_1, img_sa_t_2 = self.get_queries(experience_buffer=experience_buffer,\n mb_size=mb_size)\n\n # get labels\n sa_t_1, sa_t_2, r_t_1, r_t_2, labels = self._preference_labeller.get_label(sa_t_1, sa_t_2, r_t_1, r_t_2)\n if len(labels) > 0:\n self.put_queries(sa_t_1, sa_t_2, labels, img_sa_t_1, img_sa_t_2)\n\n return len(labels)\n\n # TODO: refactor to break the circular import that would need to happen in order to specify that reward_model here\n # should be BPref.reward_model.RewardModel\n def disagreement_sampling(self, experience_buffer: TrajectoryReplayBuffer, mb_size: int, large_batch: int,\n reward_model: torch.nn.Module) -> int:\n \"\"\"\n Grow the preference dataset with preference triplets from the experience buffer that the reward ensemble\n disagrees about\n\n Args:\n experience_buffer: the replay buffer from which to sample trajectory pairs\n mb_size: target number of preference triplets to add to the preference dataset. Fewer than the target may\n be added depending on the whether labeller skips labelling some trajectories.\n large_batch: scales up the number of triplets to add to the preference dataset to uniformly select a large\n number of trajectory pairs, which are then pruned based on which ones the reward ensemble\n has the most disagreement over\n reward_model: the ensemble of reward networks that will be used to assess disagreement.\n Should be BPref.reward_model.RewardModel, but cannot import and reference from here right now\n as it would lead to circular imports\n Returns:\n number of preference triplets added to the dataset\n \"\"\"\n # get queries\n sa_t_1, sa_t_2, r_t_1, r_t_2, img_sa_t_1, img_sa_t_2 = self.get_queries(\n experience_buffer=experience_buffer, mb_size=mb_size * large_batch)\n\n # get final queries based on ensemble member disagreement\n _, disagree = self.get_rank_probability(sa_t_1, sa_t_2, reward_model=reward_model)\n top_k_index = (-disagree).argsort()[:mb_size]\n r_t_1, sa_t_1 = r_t_1[top_k_index], sa_t_1[top_k_index]\n r_t_2, sa_t_2 = r_t_2[top_k_index], sa_t_2[top_k_index]\n if img_sa_t_1 is not None:\n img_sa_t_1 = img_sa_t_1[top_k_index]\n img_sa_t_2 = img_sa_t_2[top_k_index]\n\n # get labels\n sa_t_1, sa_t_2, r_t_1, r_t_2, labels = self._preference_labeller.get_label(\n sa_t_1, sa_t_2, r_t_1, r_t_2)\n if len(labels) > 0:\n self.put_queries(sa_t_1, sa_t_2, labels, img_sa_t_1, img_sa_t_2)\n\n return len(labels)\n\n def set_teacher_thres_skip(self, new_margin):\n self._preference_labeller.teacher_thres_skip = new_margin * self._preference_labeller.teacher_eps_skip\n\n def set_teacher_thres_equal(self, new_margin):\n self._preference_labeller.teacher_eps_equal = new_margin * self._preference_labeller.teacher_eps_equal\n\n def save(self, dataset_dir: Path, env_id: str, step: int):\n \"\"\"\n Saves the preference dataset as a zip archive and the labeller configuration as a yaml to the specified location\n\n Args:\n dataset_dir: path where the dataset is to be saved\n env_id: the environment/task within which the data was generated\n step: the number of policy training steps taken to produce this dataset\n \"\"\"\n # create the ZipFile object\n zip_obj = ZipFile(dataset_dir / f\"{env_id}_preference_dataset_{step}.zip\", \"w\")\n # the configuration for the online preference dataset\n config = {\"teacher_params\": {\"teacher_beta\": self._preference_labeller.teacher_beta,\n \"teacher_gamma\": self._preference_labeller.teacher_gamma,\n \"teacher_eps_mistake\": self._preference_labeller.teacher_eps_mistake,\n \"teacher_eps_equal\": self._preference_labeller.teacher_eps_equal,\n \"teacher_eps_skip\": self._preference_labeller.teacher_eps_skip,\n \"teacher_thres_skip\": self._preference_labeller.teacher_thres_skip,\n \"teacher_thres_equal\": self._preference_labeller.teacher_thres_equal,\n \"label_margin\": self._preference_labeller.label_margin,\n \"label_target\": self._preference_labeller.label_target}}\n with open((dataset_dir / f\"preference_dataset_config.yaml\").as_posix(), \"w+\") as f:\n yaml.dump(config, f)\n # write the labeller config to the preference dataset's zip archive\n zip_obj.write(dataset_dir / f\"preference_dataset_config.yaml\")\n\n # add each preference triplet to the zip archive\n for pref_triplet_path in self._preference_triplet_tracker:\n zip_obj.write(pref_triplet_path)\n # move the file from it temp location to the artifact directory\n file_dest_path = dataset_dir / pref_triplet_path.name\n shutil.move(pref_triplet_path, file_dest_path)\n # close the Zip File\n zip_obj.close()" }, { "identifier": "PreferenceTripletEnsembleDataLoader", "path": "reed/data/preference_data_loader.py", "snippet": "class PreferenceTripletEnsembleDataLoader:\n \"\"\"\n Handles loading and generating batches of preference triplets.\n\n The special logic needed is to handle different batch orderings for different networks in the reward ensemble\n \"\"\"\n def __init__(self, dataset: PreferenceDataset, ensemble_size: int,\n batch_size: int = 64, num_workers: int = 0, shuffle: bool = True, device: torch.device = \"cuda\"):\n \"\"\"\n Args:\n\n \"\"\"\n # create a data loader per ensemble network\n self.loader_ensemble = [DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers)\n for _ in range(ensemble_size)]\n\n self.device = device\n\n def _format_batch(self, batch: UNFORMATTED_PREFERENCE_TRIPLET_BATCH) -> FORMATTED_PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Format the preference batch so that the tensors are longs and on the correct device\n \"\"\"\n return [PreferenceTripletBatch(trajectories_one=member[0].float().to(self.device),\n trajectories_two=member[1].float().to(self.device),\n preference_labels=member[2].long().to(self.device))\n for member in batch]\n\n def dataset_length(self) -> int:\n return len(self.loader_ensemble[0].dataset)\n\n def __iter__(self) -> FORMATTED_PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Iterate through the preference triplet data loaders and return the batch per ensemble member\n\n Returns:\n list of PreferenceTripletBatch\n \"\"\"\n # set up each loader as an iterator\n iter_loader_ensemble = [iter(loader) for loader in self.loader_ensemble]\n # for each data loader grab the next batch until there are no more batches to grab\n while True:\n # check if there is a next batch to return\n try:\n yield self._format_batch([next(dataloader_iterator) for dataloader_iterator in iter_loader_ensemble])\n except StopIteration:\n break" }, { "identifier": "PreProcessInference", "path": "reed/data/preprocess_images.py", "snippet": "class PreProcessInference:\n \"\"\"\n Preprocess the data for inference by the reward, SSC, and SFC models\n \"\"\"\n def __init__(self,\n image_observations: bool = False,\n grayscale_images: bool = True,\n normalize_images: bool = True,\n environment_id: str = \"dmc\"):\n \"\"\"\n Args:\n image_observations: whether the observations are images\n grayscale_images: whether images observations should be in grayscale\n normalize_images: whether the image observations should be normalized\n environment_id: the environment from which the data is coming\n \"\"\"\n self.image_observations = image_observations\n self.grayscale_images = grayscale_images\n self.normalize_images = normalize_images\n self.environment_id = environment_id\n\n @staticmethod\n def _channel_first_to_last(observation: np.ndarray,\n batch_states: bool = False,\n by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Move the channel from the first dimension to the last dimension\n \"\"\"\n if batch_states and by_trajectory:\n return np.transpose(observation, (0, 1, 3, 4, 2))\n elif batch_states:\n return np.transpose(observation, (0, 2, 3, 1))\n else:\n return np.transpose(observation, (1, 2, 0))\n\n @staticmethod\n def _channel_last_to_first(observation: np.ndarray, batch_states: bool = False,\n by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Move the channel from the last dimension to the first dimension\n Args:\n observation: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n Returns:\n the image with the channel dimension moved from first to last\n \"\"\"\n # permute the input so that the channels are in the first dimension of the images\n if batch_states and by_trajectory:\n return np.transpose(observation, (0, 1, 4, 2, 3))\n elif batch_states:\n return np.transpose(observation, (0, 3, 1, 2))\n else:\n # permute the input so that the channels are in the first dimension\n obs = np.transpose(observation, (2, 0, 1))\n # add a dimension along the front for concatenation into the buffer\n return np.expand_dims(obs, axis=0)\n\n def format_state(self, obs: np.ndarray, batch_states: bool = False,\n by_trajectory: bool = False, channel_first: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n channel_first: whether the channel dimension is first when the observations are images.\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n if channel_first:\n # move the channel dimension from first to last to avoid a bunch of logic in our formatting methods\n # that handles variable locations for the channel dimension\n obs = self._channel_first_to_last(observation=obs,\n batch_states=batch_states,\n by_trajectory=by_trajectory)\n if self.grayscale_images:\n obs = _to_grayscale(observation=obs)\n if self.normalize_images:\n # TODO: add normalization based on pixel mean and standard deviation instead of scaling 0 to 1\n obs = np.divide(obs, 255.)\n # move the channel dimension from first to last\n return self._channel_last_to_first(observation=obs, batch_states=batch_states, by_trajectory=by_trajectory)\n\n else:\n return obs.reshape(1, obs.shape[1:]) if batch_states else obs.reshape(1, obs.shape[0])\n\n def format_state_action(self, obs: np.ndarray, act: np.ndarray,\n batch_sa: bool = False, by_trajectory: bool = False,\n channel_first: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n act: the actions associated with each state observation\n batch_sa: whether a batch of state-action pairs is to be processed\n by_trajectory: whether the batch of state-action pairs is structured by trajectory -> should only be\n True when batch_sa=True\n channel_first: whether the channel dimension is first when the observations are images.\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n if channel_first:\n # move the channel dimension from first to last to avoid a bunch of logic in our formatting methods\n # that handles variable locations for the channel dimension\n obs = self._channel_first_to_last(observation=obs,\n batch_states=batch_sa,\n by_trajectory=by_trajectory)\n if self.grayscale_images:\n obs = _to_grayscale(observation=obs)\n if self.normalize_images:\n # TODO: add normalization based on pixel mean and standard deviation instead of scaling 0 to 1\n obs = np.divide(obs, 255.)\n\n # get the dimensions of the image\n obs_dim = obs.shape[-3:]\n assert len(obs_dim) == 3\n # add the actions to the image channels and permute the input so that the channels are in the first\n # dimension of the images\n if batch_sa and by_trajectory:\n repeated_actions = np.tile(act.reshape((act.shape[0], act.shape[1], 1, 1, act.shape[-1])),\n (1, 1, obs_dim[0], obs_dim[1], 1))\n elif batch_sa:\n repeated_actions = np.tile(act.reshape((act.shape[0], 1, 1, act.shape[-1])),\n (1, obs_dim[0], obs_dim[1], 1))\n else:\n repeated_actions = np.tile(act.reshape((1, 1, -1)), (obs_dim[0], obs_dim[1], 1))\n sa_t = np.concatenate((obs, repeated_actions), axis=-1)\n return self._channel_last_to_first(sa_t, batch_states=batch_sa, by_trajectory=by_trajectory)\n else:\n sa_t = np.concatenate([obs, act], axis=-1)\n if batch_sa:\n return sa_t\n else:\n return sa_t.reshape(1, -1)" } ]
import typing as t import time import numpy as np import torch import hydra from pathlib import Path from omegaconf import dictconfig, OmegaConf from BPref import utils from BPref.logger import Logger from BPref.replay_buffer import TrajectoryReplayBuffer from collections import deque from reed.models.reward_model import StateActionRewardModel from reed.data.preference_dataset import PreferenceDataset from reed.data.preference_data_loader import PreferenceTripletEnsembleDataLoader from reed.data.preprocess_images import PreProcessInference
19,434
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # class PEBBLE: """ Train a reward model in conjunction with policy training following the PEBBLE algorithm from (Lee et al. 2021) """ def __init__(self, experiment_config: dictconfig.DictConfig): """ Args: experiment_config: contains the configuration for the experiment to be run. Access like a dictionry """ # track the experimental configuration self.experiment_config = experiment_config # create the logger to track policy learning progress self.logger = Logger( self.experiment_config.out_dir, save_tb=self.experiment_config.log_save_tb, log_frequency=self.experiment_config.log_frequency, agent=self.experiment_config.agent.name) # used to track where we are in training # total amount of feedback the reward model has solicited self.total_feedback = 0 # total amount of feedback given to the reward model self.labeled_feedback = 0 # policy train step self.step = 0 # we need to set the random seed for replication purposes utils.set_seed_everywhere(self.experiment_config.seed) # the device on which models will be trained self.device = torch.device(self.experiment_config.device) # flag to make sure we are handling multi-gpu training where we need to self.multi_gpu = torch.cuda.device_count() > 1 print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print(f"There is {torch.cuda.device_count()} GPU, so models will be trained with torch.nn.DataParallel.") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") # make the environment if 'metaworld' in self.experiment_config.env: self.env = utils.make_metaworld_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = True else: self.env = utils.make_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = False print('----------------------') print('----------------------') print('----------------------') print('----------------------') print("observation space ", self.env.observation_space.shape[0]) print("action space ", self.env.action_space.shape[0]) print('----------------------') print('----------------------') print('----------------------') print('----------------------') # we need to set the policy's observation and action space self.experiment_config.agent.params.obs_dim = self.env.observation_space.shape[0] self.experiment_config.agent.params.action_dim = self.env.action_space.shape[0] self.experiment_config.agent.params.action_range = [ float(self.env.action_space.low.min()), float(self.env.action_space.high.max()) ] # create the agent specified in the configuration self.agent = hydra.utils.instantiate(self.experiment_config.agent) # the class that will format the observations and observation action pairs for consumption by the reward model self._reward_input_preprocessor = PreProcessInference( image_observations=self.experiment_config.reward_from_image_observations, grayscale_images=self.experiment_config.grayscale_images, normalize_images=self.experiment_config.normalized_images) # determine the reward's observation space # if the reward is trained on images then the reward's observation space differs from the policy's, which is # trained on the state space self._observation_dimensionality = self._determine_observation_dimensions() self._reward_observation_dimensionality = self._determine_reward_observation_dimensions() # create the agent's replay buffer setting if image observations will need to be tracked
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # class PEBBLE: """ Train a reward model in conjunction with policy training following the PEBBLE algorithm from (Lee et al. 2021) """ def __init__(self, experiment_config: dictconfig.DictConfig): """ Args: experiment_config: contains the configuration for the experiment to be run. Access like a dictionry """ # track the experimental configuration self.experiment_config = experiment_config # create the logger to track policy learning progress self.logger = Logger( self.experiment_config.out_dir, save_tb=self.experiment_config.log_save_tb, log_frequency=self.experiment_config.log_frequency, agent=self.experiment_config.agent.name) # used to track where we are in training # total amount of feedback the reward model has solicited self.total_feedback = 0 # total amount of feedback given to the reward model self.labeled_feedback = 0 # policy train step self.step = 0 # we need to set the random seed for replication purposes utils.set_seed_everywhere(self.experiment_config.seed) # the device on which models will be trained self.device = torch.device(self.experiment_config.device) # flag to make sure we are handling multi-gpu training where we need to self.multi_gpu = torch.cuda.device_count() > 1 print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print(f"There is {torch.cuda.device_count()} GPU, so models will be trained with torch.nn.DataParallel.") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") # make the environment if 'metaworld' in self.experiment_config.env: self.env = utils.make_metaworld_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = True else: self.env = utils.make_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = False print('----------------------') print('----------------------') print('----------------------') print('----------------------') print("observation space ", self.env.observation_space.shape[0]) print("action space ", self.env.action_space.shape[0]) print('----------------------') print('----------------------') print('----------------------') print('----------------------') # we need to set the policy's observation and action space self.experiment_config.agent.params.obs_dim = self.env.observation_space.shape[0] self.experiment_config.agent.params.action_dim = self.env.action_space.shape[0] self.experiment_config.agent.params.action_range = [ float(self.env.action_space.low.min()), float(self.env.action_space.high.max()) ] # create the agent specified in the configuration self.agent = hydra.utils.instantiate(self.experiment_config.agent) # the class that will format the observations and observation action pairs for consumption by the reward model self._reward_input_preprocessor = PreProcessInference( image_observations=self.experiment_config.reward_from_image_observations, grayscale_images=self.experiment_config.grayscale_images, normalize_images=self.experiment_config.normalized_images) # determine the reward's observation space # if the reward is trained on images then the reward's observation space differs from the policy's, which is # trained on the state space self._observation_dimensionality = self._determine_observation_dimensions() self._reward_observation_dimensionality = self._determine_reward_observation_dimensions() # create the agent's replay buffer setting if image observations will need to be tracked
self.replay_buffer = TrajectoryReplayBuffer(
2
2023-11-06 23:14:20+00:00
24k
alibaba/animate-anything
train.py
[ { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n video_dir: str = \"./data\",\n video_json: str = \"\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n cache_latents = False,\n motion_threshold = 50,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n self.video_dir = video_dir\n self.video_files = json.load(open(video_json))\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n self.cache_latents = cache_latents\n self.motion_threshold = motion_threshold\n self.transform = T.Compose([\n #T.RandomResizedCrop(size=(height, width), scale=(0.8, 1.0), ratio=(width/height, width/height), antialias=False),\n T.Resize(min(height, width), antialias=False),\n T.CenterCrop([height, width])\n ])\n\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n \n @staticmethod\n def __getname__(): return 'video_json'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n mask = None\n try:\n item = self.video_files[index]\n video_path = os.path.join(self.video_dir, item['video'])\n cache_path = os.path.splitext(video_path)[0] + '.pt'\n if self.cache_latents and os.path.exists(cache_path):\n return torch.load(cache_path, map_location='cpu')\n\n prompt = item['caption']\n if self.fallback_prompt == \"<no_text>\":\n prompt = \"\"\n vr = decord.VideoReader(video_path)\n video = get_frame_batch(self.n_sample_frames, self.fps, vr, self.transform)\n except Exception as err:\n print(\"read video error\", err, video_path)\n return self.__getitem__(index+1)\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n example = {\n \"pixel_values\": normalize_input(video), \n \"prompt_ids\": prompt_ids, \n \"text_prompt\": prompt, \n 'cache_path': cache_path,\n 'dataset': self.__getname__()\n }\n mask = get_moved_area_mask(video.permute([0,2,3,1]).numpy())\n example['motion'] = calculate_motion_score(video.permute([0,2,3,1]).numpy())\n if example['motion'] < self.motion_threshold:\n return self.__getitem__(random.randint(0, len(self)-1))\n return example" }, { "identifier": "SingleVideoDataset", "path": "utils/dataset.py", "snippet": "class SingleVideoDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n frame_step: int = 1,\n single_video_path: str = \"\",\n single_video_prompt: str = \"\",\n use_caption: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n self.frames = []\n self.index = 1\n\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.n_sample_frames = n_sample_frames\n self.frame_step = frame_step\n\n self.single_video_path = single_video_path\n self.single_video_prompt = single_video_prompt\n\n self.width = width\n self.height = height\n def create_video_chunks(self):\n # Create a list of frames separated by sample frames\n # [(1,2,3), (4,5,6), ...]\n vr = decord.VideoReader(self.single_video_path)\n vr_range = range(1, len(vr), self.frame_step)\n\n self.frames = list(self.chunk(vr_range, self.n_sample_frames))\n\n # Delete any list that contains an out of range index.\n for i, inner_frame_nums in enumerate(self.frames):\n for frame_num in inner_frame_nums:\n if frame_num > len(vr):\n print(f\"Removing out of range index list at position: {i}...\")\n del self.frames[i]\n\n return self.frames\n\n def chunk(self, it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n def get_frame_batch(self, vr, resize=None):\n index = self.index\n frames = vr.get_batch(self.frames[self.index])\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def single_video_batch(self, index):\n train_data = self.single_video_path\n self.index = index\n\n if train_data.endswith(self.vid_types):\n video, _ = self.process_video_wrapper(train_data)\n\n prompt = self.single_video_prompt\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n else:\n raise ValueError(f\"Single video is not a video type. Types: {self.vid_types}\")\n \n @staticmethod\n def __getname__(): return 'single_video'\n\n def __len__(self):\n \n return len(self.create_video_chunks())\n\n def __getitem__(self, index):\n\n video, prompt, prompt_ids = self.single_video_batch(index)\n\n example = {\n \"pixel_values\": normalize_input(video),\n \"prompt_ids\": prompt_ids,\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "ImageDataset", "path": "utils/dataset.py", "snippet": "class ImageDataset(Dataset):\n \n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n base_width: int = 256,\n base_height: int = 256,\n use_caption: bool = False,\n image_dir: str = '',\n single_img_prompt: str = '',\n use_bucketing: bool = False,\n fallback_prompt: str = '',\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.img_types = (\".png\", \".jpg\", \".jpeg\", '.bmp')\n self.use_bucketing = use_bucketing\n #self.image_dir = self.get_images_list(image_dir)\n self.image_dir_path = image_dir\n self.image_dir = json.load(open(kwargs['image_json']))\n self.fallback_prompt = fallback_prompt\n\n self.use_caption = use_caption\n self.single_img_prompt = single_img_prompt\n\n self.width = width\n self.height = height\n\n def get_images_list(self, image_dir):\n if os.path.exists(image_dir):\n imgs = [x for x in os.listdir(image_dir) if x.endswith(self.img_types)]\n full_img_dir = []\n\n for img in imgs: \n full_img_dir.append(f\"{image_dir}/{img}\")\n\n return sorted(full_img_dir)\n\n return ['']\n\n def image_batch(self, index):\n train_data = self.image_dir[index]\n img, prompt = train_data['image'], train_data['caption']\n img = os.path.join(self.image_dir_path, img)\n try:\n img = torchvision.io.read_image(img, mode=torchvision.io.ImageReadMode.RGB)\n except:\n img = T.transforms.PILToTensor()(Image.open(img).convert(\"RGB\"))\n\n width = self.width\n height = self.height\n\n if self.use_bucketing:\n _, h, w = img.shape\n width, height = sensible_buckets(width, height, w, h)\n \n resize = T.transforms.Resize((height, width), antialias=True)\n\n img = resize(img) \n img = repeat(img, 'c h w -> f c h w', f=1)\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return img, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'image'\n \n def __len__(self):\n # Image directory\n return len(self.image_dir)\n\n def __getitem__(self, index):\n img, prompt, prompt_ids = self.image_batch(index)\n example = {\n \"pixel_values\": normalize_input(img),\n \"frames\": img,\n \"prompt_ids\": prompt_ids,\n \"text_prompt\": prompt, \n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "VideoFolderDataset", "path": "utils/dataset.py", "snippet": "class VideoFolderDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n path: str = \"./data\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n\n self.video_files = glob(f\"{path}/*.mp4\")\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n n_sample_frames = self.n_sample_frames\n native_fps = vr.get_avg_fps()\n \n every_nth_frame = max(1, round(native_fps / self.fps))\n every_nth_frame = min(len(vr), every_nth_frame)\n \n effective_length = len(vr) // every_nth_frame\n if effective_length < n_sample_frames:\n n_sample_frames = effective_length\n raise RuntimeError(\"not enough frames\")\n\n effective_idx = random.randint(0, (effective_length - n_sample_frames))\n idxs = every_nth_frame * np.arange(effective_idx, effective_idx + n_sample_frames)\n\n video = vr.get_batch(idxs)\n video = rearrange(video, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video, vr\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n return video, vr\n \n @staticmethod\n def __getname__(): return 'folder'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n try:\n video, _ = self.process_video_wrapper(self.video_files[index])\n except Exception as err:\n print(\"read video error\", self.video_files[index])\n video, _ = self.process_video_wrapper(self.video_files[index+1])\n\n if os.path.exists(self.video_files[index].replace(\".mp4\", \".txt\")):\n with open(self.video_files[index].replace(\".mp4\", \".txt\"), \"r\") as f:\n lines = f.readlines()\n prompt = random.choice(lines)\n else:\n prompt = self.fallback_prompt\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return {\"pixel_values\": normalize_input(video[0]), \"frames\": video[0],\n \"prompt_ids\": prompt_ids, \"text_prompt\": prompt, 'dataset': self.__getname__()}" }, { "identifier": "CachedDataset", "path": "utils/dataset.py", "snippet": "class CachedDataset(Dataset):\n def __init__(self,cache_dir: str = ''):\n self.cache_dir = cache_dir\n self.cached_data_list = self.get_files_list()\n\n def get_files_list(self):\n tensors_list = [f\"{self.cache_dir}/{x}\" for x in os.listdir(self.cache_dir) if x.endswith('.pt')]\n return sorted(tensors_list)\n\n def __len__(self):\n return len(self.cached_data_list)\n\n def __getitem__(self, index):\n cached_latent = torch.load(self.cached_data_list[index], map_location='cuda:0')\n return cached_latent" }, { "identifier": "VideoBLIPDataset", "path": "utils/dataset.py", "snippet": "class VideoBLIPDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n sample_start_idx: int = 1,\n fps: int = 1,\n json_path: str =\"\",\n json_data = None,\n vid_data_key: str = \"video_path\",\n preprocessed: bool = False,\n use_bucketing: bool = False,\n cache_latents: bool = False,\n motion_threshold = 50,\n **kwargs\n ):\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.use_bucketing = use_bucketing\n self.tokenizer = tokenizer\n self.preprocessed = preprocessed\n \n self.vid_data_key = vid_data_key\n self.train_data = self.load_from_json(json_path, json_data)\n self.cache_latents = cache_latents\n self.motion_threshold = motion_threshold\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.sample_start_idx = sample_start_idx\n self.fps = fps\n self.transform = T.Compose([\n #T.RandomResizedCrop(size=(height, width), scale=(0.8, 1.0), ratio=(width/height, width/height), antialias=False)\n T.Resize(min(height, width), antialias=False),\n T.CenterCrop([height, width])\n ])\n\n def build_json(self, json_data):\n extended_data = []\n for data in json_data['data']:\n for nested_data in data['data']:\n self.build_json_dict(\n data, \n nested_data, \n extended_data\n )\n json_data = extended_data\n return json_data\n\n def build_json_dict(self, data, nested_data, extended_data):\n clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None\n \n extended_data.append({\n self.vid_data_key: data[self.vid_data_key],\n 'frame_index': nested_data['frame_index'],\n 'prompt': nested_data['prompt'],\n 'clip_path': clip_path\n })\n \n def load_from_json(self, path, json_data):\n try:\n with open(path) as jpath:\n print(f\"Loading JSON from {path}\")\n json_data = json.load(jpath)\n\n return self.build_json(json_data)\n\n except:\n import traceback\n traceback.print_exc()\n self.train_data = []\n print(\"Non-existant JSON path. Skipping.\")\n \n def validate_json(self, base_path, path):\n return os.path.exists(f\"{base_path}/{path}\")\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def train_data_batch(self, index):\n vid_data = self.train_data[index]\n # Get video prompt\n prompt = vid_data['prompt']\n # If we are training on individual clips.\n if 'clip_path' in self.train_data[index] and \\\n self.train_data[index]['clip_path'] is not None:\n clip_path = vid_data['clip_path']\n else:\n clip_path = vid_data[self.vid_data_key]\n # Get the frame of the current index.\n self.sample_start_idx = vid_data['frame_index']\n cache_path = os.path.splitext(clip_path)[0] + '.pt'\n if self.cache_latents and os.path.exists(cache_path):\n return torch.load(cache_path, map_location='cpu')\n\n vr = decord.VideoReader(clip_path)\n video = get_frame_batch(self.n_sample_frames, self.fps, vr, self.transform)\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n example = {\n \"pixel_values\": normalize_input(video),\n \"prompt_ids\": prompt_ids,\n \"text_prompt\": prompt,\n 'dataset': self.__getname__(),\n 'cache_path': cache_path,\n }\n mask = get_moved_area_mask(video.permute([0,2,3,1]).numpy())\n example['mask'] = mask\n example['motion'] = calculate_motion_score(video.permute([0,2,3,1]).numpy())\n return example\n \n\n @staticmethod\n def __getname__(): return 'video_blip'\n\n def __len__(self):\n if self.train_data is not None:\n return len(self.train_data)\n else: \n return 0\n\n def __getitem__(self, index):\n example = self.train_data_batch(index)\n if example['motion'] < self.motion_threshold:\n return self.__getitem__(random.randint(0, len(self)-1))\n return example" }, { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition_mask.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n motion_mask = False,\n motion_strength = False,\n ):\n super().__init__()\n self.motion_mask = motion_mask\n self.motion_strength = motion_strength\n print(f\"motion mask {self.motion_mask}, motion_strength {self.motion_strength}\")\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n self.conv_in2 = nn.Conv2d(\n 5, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n cond_proj_dim=block_out_channels[0],\n )\n\n self.motion_proj = Timesteps(block_out_channels[0], True, 0)\n self.motion_embedding = nn.Sequential(\n nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(),\n nn.Linear(time_embed_dim, time_embed_dim))\n nn.init.zeros_(self.motion_embedding[-1].weight)\n nn.init.zeros_(self.motion_embedding[-1].bias)\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value \n \n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n condition_latent: torch.Tensor,\n mask: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n motion = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n sample = torch.cat([condition_latent, sample], dim=2)\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n if self.motion_strength and motion is not None:\n timestep_cond = self.motion_proj(motion).to(dtype=self.dtype)\n emb = self.time_embedding(t_emb, timestep_cond)\n #emb += self.motion_embedding(m_emb)\n else:\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n if self.motion_mask and mask is not None:\n mask = repeat(mask , 'b 1 1 h w -> (t b) 1 f h w', t=sample.shape[0]//mask.shape[0], f=sample.shape[2])\n sample = torch.cat([mask, sample], dim=1)\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in2(sample)\n else:\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n\n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for i, downsample_block in enumerate(self.down_blocks):\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n \n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n sample = sample[:,:,1:]\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "LatentToVideoPipeline", "path": "models/pipeline.py", "snippet": "class LatentToVideoPipeline(TextToVideoSDPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt = None,\n height= None,\n width= None,\n num_frames: int = 16,\n num_inference_steps: int = 50,\n guidance_scale= 9.0,\n negative_prompt= None,\n eta: float = 0.0,\n generator= None,\n latents= None,\n prompt_embeds= None,\n negative_prompt_embeds= None,\n output_type= \"np\",\n return_dict: bool = True,\n callback= None,\n callback_steps: int = 1,\n cross_attention_kwargs= None,\n condition_latent=None,\n mask=None,\n timesteps=None,\n motion=None,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated video.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated video.\n num_frames (`int`, *optional*, defaults to 16):\n The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds\n amounts to 2 seconds of video.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality videos at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate videos that are closely linked to the text `prompt`,\n usually at the expense of lower video quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the video generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`. Latents should be of shape\n `(batch_size, num_channel, num_frames, height, width)`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n output_type (`str`, *optional*, defaults to `\"np\"`):\n The output format of the generate video. Choose between `torch.FloatTensor` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.TextToVideoSDPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Examples:\n\n Returns:\n [`~pipelines.stable_diffusion.TextToVideoSDPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.TextToVideoSDPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated frames.\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n num_images_per_prompt = 1\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n #device = self._execution_device\n device = latents.device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n if timesteps is None:\n timesteps = self.scheduler.timesteps\n else:\n num_inference_steps = len(timesteps)\n # 5. Prepare latent variables. do nothing\n\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n uncondition_latent = condition_latent\n condition_latent = torch.cat([uncondition_latent, condition_latent]) if do_classifier_free_guidance else condition_latent \n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n if motion is not None:\n motion = torch.tensor(motion, device=device)\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n condition_latent=condition_latent,\n mask=mask,\n motion=motion\n ).sample\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # reshape latents\n bsz, channel, frames, width, height = latents.shape\n latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # reshape latents back\n latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4)\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n video_tensor = self.decode_latents(latents)\n\n if output_type == \"pt\":\n video = video_tensor\n else:\n video = tensor2vid(video_tensor)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (video, latents)\n\n return TextToVideoSDPipelineOutput(frames=video)" }, { "identifier": "LoraHandler", "path": "utils/lora_handler.py", "snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = ['UNet3DConditionModel'],\n text_encoder_replace_modules: list = ['CLIPEncoderLayer']\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n if self.use_lora:\n print(f\"Using LoRA Version: {self.version}\")\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n def is_stable_lora(self):\n return self.version == LoraVersions.stable_lora\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n\n if self.is_stable_lora():\n\n if func_type == LoraFuncTypes.loader:\n return load_lora\n\n if func_type == LoraFuncTypes.injector:\n return add_lora_to\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occured while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r\n })\n\n if self.is_stable_lora():\n KEYS = ['model', 'lora_path']\n return_dict = filter_dict(return_dict, KEYS)\n \n return_dict.update({'model': model, 'lora_path': lora_path})\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) \n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n if self.is_stable_lora():\n injector_args = lora_args.copy()\n injector_args = filter_dict(injector_args, keys=STABLE_LORA_KEYS)\n\n SEARCH_CLASS = [torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.Embedding]\n\n injector_args.update({\n \"model\": model,\n \"target_module\": REPLACE_MODULES,\n \"search_class\": SEARCH_CLASS,\n \"r\": r,\n \"dropout\": dropout,\n \"lora_bias\": self.lora_bias\n })\n\n activator = self.lora_injector(**injector_args)\n activator()\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias\n )\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n \n\n def deactivate_lora_train(self, models, deactivate=True):\n \"\"\"\n Usage: Use before and after sampling previews.\n Currently only available for Stable LoRA.\n \"\"\"\n if self.is_stable_lora():\n set_mode_group(models, not deactivate)\n\n def save_cloneofsimo_lora(self, model, save_path, step):\n \n def save_lora(model, name, condition, replace_modules, step, save_path): \n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path, \n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path\n )\n\n train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_stable_lora(\n self, \n model, \n step, \n name, \n save_path = '', \n save_for_webui=False,\n only_for_webui=False\n ):\n import uuid\n\n save_filename = f\"{step}_{name}\"\n lora_metadata = metadata = {\n \"stable_lora_text_to_video\": \"v1\", \n \"lora_name\": name + \"_\" + uuid.uuid4().hex.lower()[:5]\n }\n save_lora(\n unet=model.unet,\n text_encoder=model.text_encoder,\n save_text_weights=self.use_text_lora,\n output_dir=save_path,\n lora_filename=save_filename,\n lora_bias=self.lora_bias,\n save_for_webui=self.save_for_webui,\n only_webui=self.only_for_webui,\n metadata=lora_metadata,\n unet_dict_converter=convert_unet_state_dict,\n text_dict_converter=convert_text_enc_state_dict_v20\n )\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = ''):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step)\n\n if self.is_stable_lora():\n name = 'lora_text_to_video'\n self.save_stable_lora(model, step, name, save_path)" }, { "identifier": "LORA_VERSIONS", "path": "utils/lora_handler.py", "snippet": "LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo]" }, { "identifier": "read_mask", "path": "utils/common.py", "snippet": "def read_mask(json_path, label=[\"mask\"]):\n j = json.load(open(json_path)) \n if type(label) != list:\n labels = [label]\n height = j['imageHeight']\n width = j['imageWidth']\n mask = np.zeros([height, width], dtype=np.uint8)\n for shape in j['shapes']:\n if shape['label'] in label:\n x1, y1 = shape['points'][0]\n x2, y2 = shape['points'][1]\n mask[int(y1):int(y2), int(x1):int(x2)] = 255\n return mask" }, { "identifier": "generate_random_mask", "path": "utils/common.py", "snippet": "def generate_random_mask(image):\n # Create a blank mask with the same size as the image\n b, c , h, w = image.shape\n mask = np.zeros([b, h, w], dtype=np.uint8)\n \n # Generate random coordinates for the mask\n num_points = np.random.randint(3, 10) # Randomly choose the number of points to generate\n points = np.random.randint(0, min(h, w), size=(num_points, 2)) # Randomly generate the points\n # Draw a filled polygon on the mask using the random points\n for i in range(b):\n width = random.randint(w//4, w)\n height = random.randint(h//4, h)\n x = random.randint(0, w-width)\n y = random.randint(0, h-height)\n points=np.array([[x, y], [x+width, y], [x+width, y+height], [x, y+height]])\n mask[i] = cv2.fillPoly(mask[i], [points], 255)\n \n # Apply the mask to the image\n #masked_image = cv2.bitwise_and(image, image, mask=mask)\n return mask " }, { "identifier": "slerp", "path": "utils/common.py", "snippet": "def slerp(z1, z2, alpha):\n theta = torch.acos(torch.sum(z1 * z2) / (torch.norm(z1) * torch.norm(z2)))\n return (\n torch.sin((1 - alpha) * theta) / torch.sin(theta) * z1\n + torch.sin(alpha * theta) / torch.sin(theta) * z2\n )" }, { "identifier": "calculate_motion_score", "path": "utils/common.py", "snippet": "def calculate_motion_score(frame_imgs, calculate_edges=False, color=\"RGB\") -> float:\n # Convert image into HSV colorspace.\n _last_frame = None\n\n _weights = [1.0, 1.0, 1.0, 0.0]\n score = 0\n for frame_img in frame_imgs:\n if color == \"RGB\":\n hue, sat, lum = cv2.split(cv2.cvtColor(frame_img, cv2.COLOR_RGB2HSV))\n else:\n hue, sat, lum = cv2.split(cv2.cvtColor(frame_img, cv2.COLOR_BGR2HSV))\n # Performance: Only calculate edges if we have to.\n edges = _detect_edges(lum) if calculate_edges else None\n if _last_frame == None:\n _last_frame = (hue, sat, lum, edges)\n continue\n\n score_components = [\n _mean_pixel_distance(hue, _last_frame[0]),\n _mean_pixel_distance(sat, _last_frame[1]),\n _mean_pixel_distance(lum, _last_frame[2]),\n 0.0 if edges is None else _mean_pixel_distance(edges, _last_frame[3]),\n ]\n\n frame_score: float = (\n sum(component * weight for (component, weight) in zip(score_components, _weights))\n / sum(abs(weight) for weight in _weights))\n score += frame_score\n _last_frame = (hue, sat, lum, edges)\n\n return round(score/(len(frame_imgs)-1) * 10)" }, { "identifier": "read_video", "path": "utils/common.py", "snippet": "def read_video(video_path, frame_number=-1):\n # Open the video file\n cap = cv2.VideoCapture(video_path)\n count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) \n if frame_number == -1:\n frame_number = count\n else:\n frame_number = min(frame_number, count)\n frames = []\n for i in range(frame_number):\n ret, ref_frame = cap.read()\n ref_frame = cv2.cvtColor(ref_frame, cv2.COLOR_BGR2RGB)\n if not ret:\n raise ValueError(\"Failed to read video file\")\n frames.append(ref_frame)\n return frames" }, { "identifier": "calculate_motion_precision", "path": "utils/common.py", "snippet": "def calculate_motion_precision(frames, mask):\n moved_mask = get_moved_area_mask(frames, move_th=20, th=0)\n moved = moved_mask == 255\n gt = mask == 255\n precision = np.sum(moved & gt) / np.sum(moved)\n return precision" }, { "identifier": "calculate_latent_motion_score", "path": "utils/common.py", "snippet": "def calculate_latent_motion_score(latents):\n #latents b, c f, h, w\n diff=torch.abs(latents[:,:,1:]-latents[:,:,:-1])\n motion_score = torch.sum(torch.mean(diff, dim=[2,3,4]), dim=1) * 10\n return motion_score" }, { "identifier": "DDPM_forward", "path": "utils/common.py", "snippet": "def DDPM_forward(x0, step, num_frames, scheduler):\n device = x0.device\n t = scheduler.timesteps[-1]\n xt = repeat(x0, 'b c 1 h w -> b c f h w', f = num_frames)\n\n eps = torch.randn_like(xt)\n alpha_vec = torch.prod(scheduler.alphas[t:])\n xt = torch.sqrt(alpha_vec) * xt + torch.sqrt(1-alpha_vec) * eps\n return xt, None" }, { "identifier": "DDPM_forward_timesteps", "path": "utils/common.py", "snippet": "def DDPM_forward_timesteps(x0, step, num_frames, scheduler):\n '''larger step -> smaller t -> smaller alphas[t:] -> smaller xt -> smaller x0'''\n\n device = x0.device\n # timesteps are reversed\n timesteps = scheduler.timesteps[len(scheduler.timesteps)-step:]\n t = timesteps[0]\n\n if x0.shape[2] == 1:\n xt = repeat(x0, 'b c 1 h w -> b c f h w', f = num_frames)\n else:\n xt = x0\n noise = torch.randn(xt.shape, dtype=xt.dtype, device=device)\n # t to tensor of batch size \n t = torch.tensor([t]*xt.shape[0], device=device)\n xt = scheduler.add_noise(xt, noise, t)\n return xt, timesteps" }, { "identifier": "DDPM_forward_mask", "path": "utils/common.py", "snippet": "def DDPM_forward_mask(x0, step, num_frames, scheduler, mask):\n '''larger step -> smaller t -> smaller alphas[t:] -> smaller xt -> smaller x0'''\n device = x0.device\n dtype = x0.dtype\n b, c, f, h, w = x0.shape\n\n move_xt, timesteps = DDPM_forward_timesteps(x0, step, num_frames, scheduler)\n mask = T.ToTensor()(mask).to(dtype).to(device)\n mask = T.Resize([h, w], antialias=False)(mask)\n mask = rearrange(mask, 'b h w -> b 1 1 h w')\n freeze_xt = repeat(x0, 'b c 1 h w -> b c f h w', f = num_frames)\n initial = freeze_xt * (1-mask) + move_xt * mask\n return initial, timesteps" }, { "identifier": "motion_mask_loss", "path": "utils/common.py", "snippet": "def motion_mask_loss(latents, mask):\n diff = torch.abs(latents[:,:,1:] - latents[:,:,:-1])\n loss = torch.sum(torch.mean(diff * (1-mask), dim=[2,3,4]), dim=1)\n return loss" }, { "identifier": "generate_center_mask", "path": "utils/common.py", "snippet": "def generate_center_mask(image):\n # Create a blank mask with the same size as the image\n b, c , h, w = image.shape\n mask = np.zeros([b, h, w], dtype=np.uint8)\n \n # Generate random coordinates for the mask\n for i in range(b):\n width = int(w/10)\n height = int(h/10)\n mask[i][height:-height,width:-width] = 255\n # Apply the mask to the image\n #masked_image = cv2.bitwise_and(image, image, mask=mask)\n return mask " }, { "identifier": "tensor_to_vae_latent", "path": "utils/common.py", "snippet": "def tensor_to_vae_latent(t, vae):\n video_length = t.shape[1]\n\n t = rearrange(t, \"b f c h w -> (b f) c h w\")\n latents = vae.encode(t).latent_dist.sample()\n latents = rearrange(latents, \"(b f) c h w -> b c f h w\", f=video_length)\n latents = latents * 0.18215\n\n return latents" } ]
import argparse import datetime import logging import inspect import math import os import json import gc import copy import random import cv2 import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms as T import diffusers import transformers import numpy as np import imageio import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from tqdm.auto import tqdm from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers.models import AutoencoderKL from diffusers import DPMSolverMultistepScheduler, DDPMScheduler from diffusers.image_processor import VaeImageProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, export_to_video from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth import tensor2vid from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset, VideoBLIPDataset from einops import rearrange, repeat from models.unet_3d_condition_mask import UNet3DConditionModel from models.pipeline import LatentToVideoPipeline from utils.lora_handler import LoraHandler, LORA_VERSIONS from utils.common import read_mask, generate_random_mask, slerp, calculate_motion_score, \ read_video, calculate_motion_precision, calculate_latent_motion_score, \ DDPM_forward, DDPM_forward_timesteps, DDPM_forward_mask, motion_mask_loss, \ generate_center_mask, tensor_to_vae_latent from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
20,006
# Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16) batch['pixel_values'] = tensor_to_vae_latent(pixel_values, vae) for k, v in batch.items(): batch[k] = v[0] torch.save(batch, full_out_path) del pixel_values del batch # We do this to avoid fragmentation from casting latents between devices. torch.cuda.empty_cache() else: cache_save_dir = cached_latent_dir return torch.utils.data.DataLoader( CachedDataset(cache_dir=cache_save_dir), batch_size=train_batch_size, shuffle=shuffle, num_workers=0 ) def handle_trainable_modules(model, trainable_modules, not_trainable_modules=[], is_enabled=True, negation=None): global already_printed_trainables # This can most definitely be refactored :-) unfrozen_params = 0 print(f"not trainable {not_trainable_modules}") for name, module in model.named_modules(): check = False for tm in tuple(trainable_modules): if tm == 'all' or (tm in name and 'lora' not in name): check = True break for tm in not_trainable_modules: if tm in name: check = False break if check: for m in module.parameters(): m.requires_grad_(is_enabled) if is_enabled: unfrozen_params +=1 if unfrozen_params > 0 and not already_printed_trainables: already_printed_trainables = True print(f"{unfrozen_params} params have been unfrozen for training.") def sample_noise(latents, noise_strength, use_offset_noise=False): b ,c, f, *_ = latents.shape noise_latents = torch.randn_like(latents, device=latents.device) offset_noise = None if use_offset_noise: offset_noise = torch.randn(b, c, f, 1, 1, device=latents.device) noise_latents = noise_latents + noise_strength * offset_noise return noise_latents def enforce_zero_terminal_snr(betas): """ Corrects noise in diffusion schedulers. From: Common Diffusion Noise Schedules and Sample Steps are Flawed https://arxiv.org/pdf/2305.08891.pdf """ # Convert betas to alphas_bar_sqrt alphas = 1 - betas alphas_bar = alphas.cumprod(0) alphas_bar_sqrt = alphas_bar.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / ( alphas_bar_sqrt_0 - alphas_bar_sqrt_T ) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas def should_sample(global_step, validation_steps, validation_data): return (global_step % validation_steps == 0 or global_step == 5) \ and validation_data.sample_preview def save_pipe( path, global_step, accelerator, unet, text_encoder, vae, output_dir,
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] dataset_cls = [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset, VideoBLIPDataset] dataset_map = {d.__getname__(): d for d in dataset_cls} # Loop through all available datasets, get the name, then add to list of data to process. for dataset in dataset_types: if dataset in dataset_map: train_datasets.append(dataset_map[dataset](**train_data, tokenizer=tokenizer)) else: raise ValueError(f"Dataset type not found: {dataset} not in {dataset_map.keys()}") return train_datasets def extend_datasets(datasets, dataset_items, extend=False): biggest_data_len = max(x.__len__() for x in datasets) extended = [] for dataset in datasets: if dataset.__len__() == 0: del dataset continue if dataset.__len__() < biggest_data_len: for item in dataset_items: if extend and item not in extended and hasattr(dataset, item): print(f"Extending {item}") value = getattr(dataset, item) value *= biggest_data_len value = value[:biggest_data_len] setattr(dataset, item, value) print(f"New {item} dataset length: {dataset.__len__()}") extended.append(item) def export_to_video(video_frames, output_video_path, fps): fourcc = cv2.VideoWriter_fourcc(*"mp4v") h, w, _ = video_frames[0].shape video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h)) for i in range(len(video_frames)): img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) video_writer.write(img) def create_output_folders(output_dir, config): now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") out_dir = os.path.join(output_dir, f"train_{now}") os.makedirs(out_dir, exist_ok=True) os.makedirs(f"{out_dir}/samples", exist_ok=True) OmegaConf.save(config, os.path.join(out_dir, 'config.yaml')) return out_dir def load_primary_models(pretrained_model_path, motion_mask, motion_strength): noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae") unet = UNet3DConditionModel.from_pretrained(pretrained_model_path, subfolder="unet", low_cpu_mem_usage=False, device_map=None, motion_mask=motion_mask, motion_strength=motion_strength) if pretrained_model_path.endswith('zeroscope_v2_576w'): #first time init, modify unet conv in2 unet.conv_in2.bias.data = copy.deepcopy(unet.conv_in.bias) torch.nn.init.zeros_(unet.conv_in2.weight) unet.conv_in2.weight.data[:,1:]= copy.deepcopy(unet.conv_in.weight) return noise_scheduler, tokenizer, text_encoder, vae, unet def unet_and_text_g_c(unet, text_encoder, unet_enable, text_enable): unet._set_gradient_checkpointing(value=unet_enable) if text_enable: text_encoder.gradient_checkpointing_enable() else: text_encoder.gradient_checkpointing_disable() def freeze_models(models_to_freeze): for model in models_to_freeze: if model is not None: model.requires_grad_(False) def is_attn(name): return ('attn1' or 'attn2' == name.split('.')[-1]) def set_processors(attentions): for attn in attentions: attn.set_processor(AttnProcessor2_0()) def set_torch_2_attn(unet): optim_count = 0 for name, module in unet.named_modules(): if is_attn(name): if isinstance(module, torch.nn.ModuleList): for m in module: if isinstance(m, BasicTransformerBlock): set_processors([m.attn1, m.attn2]) optim_count += 1 if optim_count > 0: print(f"{optim_count} Attention layers using Scaled Dot Product Attention.") def handle_memory_attention(enable_xformers_memory_efficient_attention, enable_torch_2_attn, unet): try: is_torch_2 = hasattr(F, 'scaled_dot_product_attention') enable_torch_2 = is_torch_2 and enable_torch_2_attn if enable_xformers_memory_efficient_attention and not enable_torch_2: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) else: raise ValueError("xformers is not available. Make sure it is installed correctly") if enable_torch_2: set_torch_2_attn(unet) except: print("Could not enable memory efficient attention for xformers or Torch 2.0.") def param_optim(model, condition, extra_params=None, is_lora=False, negation=None): extra_params = extra_params if len(extra_params.keys()) > 0 else None return { "model": model, "condition": condition, 'extra_params': extra_params, 'is_lora': is_lora, "negation": negation } def create_optim_params(name='param', params=None, lr=5e-6, extra_params=None): params = { "name": name, "params": params, "lr": lr } if extra_params is not None: for k, v in extra_params.items(): params[k] = v return params def negate_params(name, negation): # We have to do this if we are co-training with LoRA. # This ensures that parameter groups aren't duplicated. if negation is None: return False for n in negation: if n in name and 'temp' not in name: return True return False def create_optimizer_params(model_list, lr): optimizer_params = [] for optim in model_list: model, condition, extra_params, is_lora, negation = optim.values() # Check if we are doing LoRA training. if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, device, weight_dtype): for model in model_list: if model is not None: model.to(device, dtype=weight_dtype) def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, cached_latent_dir=None, shuffle=False ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16) batch['pixel_values'] = tensor_to_vae_latent(pixel_values, vae) for k, v in batch.items(): batch[k] = v[0] torch.save(batch, full_out_path) del pixel_values del batch # We do this to avoid fragmentation from casting latents between devices. torch.cuda.empty_cache() else: cache_save_dir = cached_latent_dir return torch.utils.data.DataLoader( CachedDataset(cache_dir=cache_save_dir), batch_size=train_batch_size, shuffle=shuffle, num_workers=0 ) def handle_trainable_modules(model, trainable_modules, not_trainable_modules=[], is_enabled=True, negation=None): global already_printed_trainables # This can most definitely be refactored :-) unfrozen_params = 0 print(f"not trainable {not_trainable_modules}") for name, module in model.named_modules(): check = False for tm in tuple(trainable_modules): if tm == 'all' or (tm in name and 'lora' not in name): check = True break for tm in not_trainable_modules: if tm in name: check = False break if check: for m in module.parameters(): m.requires_grad_(is_enabled) if is_enabled: unfrozen_params +=1 if unfrozen_params > 0 and not already_printed_trainables: already_printed_trainables = True print(f"{unfrozen_params} params have been unfrozen for training.") def sample_noise(latents, noise_strength, use_offset_noise=False): b ,c, f, *_ = latents.shape noise_latents = torch.randn_like(latents, device=latents.device) offset_noise = None if use_offset_noise: offset_noise = torch.randn(b, c, f, 1, 1, device=latents.device) noise_latents = noise_latents + noise_strength * offset_noise return noise_latents def enforce_zero_terminal_snr(betas): """ Corrects noise in diffusion schedulers. From: Common Diffusion Noise Schedules and Sample Steps are Flawed https://arxiv.org/pdf/2305.08891.pdf """ # Convert betas to alphas_bar_sqrt alphas = 1 - betas alphas_bar = alphas.cumprod(0) alphas_bar_sqrt = alphas_bar.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / ( alphas_bar_sqrt_0 - alphas_bar_sqrt_T ) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas def should_sample(global_step, validation_steps, validation_data): return (global_step % validation_steps == 0 or global_step == 5) \ and validation_data.sample_preview def save_pipe( path, global_step, accelerator, unet, text_encoder, vae, output_dir,
lora_manager: LoraHandler,
8
2023-12-07 08:26:29+00:00
24k
modelscope/richdreamer
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> BaseGeometry:\n return other" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def initialize_shape(self) -> None:\n pass\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n \"points_scaled\": points.view(-1, self.cfg.n_input_dims),\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "get_rank", "path": "threestudio/utils/misc.py", "snippet": "def get_rank():\n # SLURM_PROCID can be set even if SLURM is not managing the multiprocessing,\n # therefore LOCAL_RANK needs to be checked first\n rank_keys = (\"RANK\", \"LOCAL_RANK\", \"SLURM_PROCID\", \"JSM_NAMESPACE_RANK\")\n for key in rank_keys:\n rank = os.environ.get(key)\n if rank is not None:\n return int(rank)\n return 0" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import numpy as np import os import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import (BaseExplicitGeometry, BaseGeometry, contract_to_unisphere,) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast, get_rank from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,263
).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 nerf_scale: float = 1.0 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False # sdf_bias: Union[float, str] = 0.0 # sdf_bias_params: Optional[Any] = None cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
other: BaseGeometry,
1
2023-12-06 07:53:11+00:00
24k
rehg-lab/RAVE
annotator/oneformer/detectron2/modeling/meta_arch/retinanet.py
[ { "identifier": "configurable", "path": "annotator/oneformer/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\r\n \"\"\"\r\n Decorate a function or a class's __init__ method so that it can be called\r\n with a :class:`CfgNode` object using a :func:`from_config` function that translates\r\n :class:`CfgNode` to arguments.\r\n\r\n Examples:\r\n ::\r\n # Usage 1: Decorator on __init__:\r\n class A:\r\n @configurable\r\n def __init__(self, a, b=2, c=3):\r\n pass\r\n\r\n @classmethod\r\n def from_config(cls, cfg): # 'cfg' must be the first argument\r\n # Returns kwargs to be passed to __init__\r\n return {\"a\": cfg.A, \"b\": cfg.B}\r\n\r\n a1 = A(a=1, b=2) # regular construction\r\n a2 = A(cfg) # construct with a cfg\r\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\r\n\r\n # Usage 2: Decorator on any function. Needs an extra from_config argument:\r\n @configurable(from_config=lambda cfg: {\"a: cfg.A, \"b\": cfg.B})\r\n def a_func(a, b=2, c=3):\r\n pass\r\n\r\n a1 = a_func(a=1, b=2) # regular call\r\n a2 = a_func(cfg) # call with a cfg\r\n a3 = a_func(cfg, b=3, c=4) # call with extra overwrite\r\n\r\n Args:\r\n init_func (callable): a class's ``__init__`` method in usage 1. The\r\n class must have a ``from_config`` classmethod which takes `cfg` as\r\n the first argument.\r\n from_config (callable): the from_config function in usage 2. It must take `cfg`\r\n as its first argument.\r\n \"\"\"\r\n\r\n if init_func is not None:\r\n assert (\r\n inspect.isfunction(init_func)\r\n and from_config is None\r\n and init_func.__name__ == \"__init__\"\r\n ), \"Incorrect use of @configurable. Check API documentation for examples.\"\r\n\r\n @functools.wraps(init_func)\r\n def wrapped(self, *args, **kwargs):\r\n try:\r\n from_config_func = type(self).from_config\r\n except AttributeError as e:\r\n raise AttributeError(\r\n \"Class with @configurable must have a 'from_config' classmethod.\"\r\n ) from e\r\n if not inspect.ismethod(from_config_func):\r\n raise TypeError(\"Class with @configurable must have a 'from_config' classmethod.\")\r\n\r\n if _called_with_cfg(*args, **kwargs):\r\n explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)\r\n init_func(self, **explicit_args)\r\n else:\r\n init_func(self, *args, **kwargs)\r\n\r\n return wrapped\r\n\r\n else:\r\n if from_config is None:\r\n return configurable # @configurable() is made equivalent to @configurable\r\n assert inspect.isfunction(\r\n from_config\r\n ), \"from_config argument of configurable must be a function!\"\r\n\r\n def wrapper(orig_func):\r\n @functools.wraps(orig_func)\r\n def wrapped(*args, **kwargs):\r\n if _called_with_cfg(*args, **kwargs):\r\n explicit_args = _get_args_from_config(from_config, *args, **kwargs)\r\n return orig_func(**explicit_args)\r\n else:\r\n return orig_func(*args, **kwargs)\r\n\r\n wrapped.from_config = from_config\r\n return wrapped\r\n\r\n return wrapper\r" }, { "identifier": "get_norm", "path": "annotator/oneformer/detectron2/layers/batch_norm.py", "snippet": "def get_norm(norm, out_channels):\r\n \"\"\"\r\n Args:\r\n norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;\r\n or a callable that takes a channel number and returns\r\n the normalization layer as a nn.Module.\r\n\r\n Returns:\r\n nn.Module or None: the normalization layer\r\n \"\"\"\r\n if norm is None:\r\n return None\r\n if isinstance(norm, str):\r\n if len(norm) == 0:\r\n return None\r\n norm = {\r\n \"BN\": BatchNorm2d,\r\n # Fixed in https://github.com/pytorch/pytorch/pull/36382\r\n \"SyncBN\": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,\r\n \"FrozenBN\": FrozenBatchNorm2d,\r\n \"GN\": lambda channels: nn.GroupNorm(32, channels),\r\n # for debugging:\r\n \"nnSyncBN\": nn.SyncBatchNorm,\r\n \"naiveSyncBN\": NaiveSyncBatchNorm,\r\n # expose stats_mode N as an option to caller, required for zero-len inputs\r\n \"naiveSyncBN_N\": lambda channels: NaiveSyncBatchNorm(channels, stats_mode=\"N\"),\r\n \"LN\": lambda channels: LayerNorm(channels),\r\n }[norm]\r\n return norm(out_channels)\r" }, { "identifier": "CycleBatchNormList", "path": "annotator/oneformer/detectron2/layers/batch_norm.py", "snippet": "class CycleBatchNormList(nn.ModuleList):\r\n \"\"\"\r\n Implement domain-specific BatchNorm by cycling.\r\n\r\n When a BatchNorm layer is used for multiple input domains or input\r\n features, it might need to maintain a separate test-time statistics\r\n for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`.\r\n\r\n This module implements it by using N separate BN layers\r\n and it cycles through them every time a forward() is called.\r\n\r\n NOTE: The caller of this module MUST guarantee to always call\r\n this module by multiple of N times. Otherwise its test-time statistics\r\n will be incorrect.\r\n \"\"\"\r\n\r\n def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs):\r\n \"\"\"\r\n Args:\r\n length: number of BatchNorm layers to cycle.\r\n bn_class: the BatchNorm class to use\r\n kwargs: arguments of the BatchNorm class, such as num_features.\r\n \"\"\"\r\n self._affine = kwargs.pop(\"affine\", True)\r\n super().__init__([bn_class(**kwargs, affine=False) for k in range(length)])\r\n if self._affine:\r\n # shared affine, domain-specific BN\r\n channels = self[0].num_features\r\n self.weight = nn.Parameter(torch.ones(channels))\r\n self.bias = nn.Parameter(torch.zeros(channels))\r\n self._pos = 0\r\n\r\n def forward(self, x):\r\n ret = self[self._pos](x)\r\n self._pos = (self._pos + 1) % len(self)\r\n\r\n if self._affine:\r\n w = self.weight.reshape(1, -1, 1, 1)\r\n b = self.bias.reshape(1, -1, 1, 1)\r\n return ret * w + b\r\n else:\r\n return ret\r\n\r\n def extra_repr(self):\r\n return f\"affine={self._affine}\"\r" }, { "identifier": "batched_nms", "path": "annotator/oneformer/detectron2/layers/nms.py", "snippet": "def batched_nms(\r\n boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float\r\n):\r\n \"\"\"\r\n Same as torchvision.ops.boxes.batched_nms, but with float().\r\n \"\"\"\r\n assert boxes.shape[-1] == 4\r\n # Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311)\r\n # to decide whether to use coordinate trick or for loop to implement batched_nms. So we\r\n # just call it directly.\r\n # Fp16 does not have enough range for batched NMS, so adding float().\r\n return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold)\r" }, { "identifier": "ShapeSpec", "path": "annotator/oneformer/detectron2/layers/shape_spec.py", "snippet": "class ShapeSpec:\r\n \"\"\"\r\n A simple structure that contains basic shape specification about a tensor.\r\n It is often used as the auxiliary inputs/outputs of models,\r\n to complement the lack of shape inference ability among pytorch modules.\r\n \"\"\"\r\n\r\n channels: Optional[int] = None\r\n height: Optional[int] = None\r\n width: Optional[int] = None\r\n stride: Optional[int] = None\r" }, { "identifier": "cat", "path": "annotator/oneformer/detectron2/layers/wrappers.py", "snippet": "def cat(tensors: List[torch.Tensor], dim: int = 0):\r\n \"\"\"\r\n Efficient version of torch.cat that avoids a copy if there is only a single element in a list\r\n \"\"\"\r\n assert isinstance(tensors, (list, tuple))\r\n if len(tensors) == 1:\r\n return tensors[0]\r\n return torch.cat(tensors, dim)\r" }, { "identifier": "Boxes", "path": "annotator/oneformer/detectron2/structures/boxes.py", "snippet": "class Boxes:\r\n \"\"\"\r\n This structure stores a list of boxes as a Nx4 torch.Tensor.\r\n It supports some common methods about boxes\r\n (`area`, `clip`, `nonempty`, etc),\r\n and also behaves like a Tensor\r\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\r\n\r\n Attributes:\r\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor):\r\n \"\"\"\r\n Args:\r\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n if not isinstance(tensor, torch.Tensor):\r\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device(\"cpu\"))\r\n else:\r\n tensor = tensor.to(torch.float32)\r\n if tensor.numel() == 0:\r\n # Use reshape, so we don't end up creating a new tensor that does not depend on\r\n # the inputs (and consequently confuses jit)\r\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)\r\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\r\n\r\n self.tensor = tensor\r\n\r\n def clone(self) -> \"Boxes\":\r\n \"\"\"\r\n Clone the Boxes.\r\n\r\n Returns:\r\n Boxes\r\n \"\"\"\r\n return Boxes(self.tensor.clone())\r\n\r\n def to(self, device: torch.device):\r\n # Boxes are assumed float32 and does not support to(dtype)\r\n return Boxes(self.tensor.to(device=device))\r\n\r\n def area(self) -> torch.Tensor:\r\n \"\"\"\r\n Computes the area of all the boxes.\r\n\r\n Returns:\r\n torch.Tensor: a vector with areas of each box.\r\n \"\"\"\r\n box = self.tensor\r\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\r\n return area\r\n\r\n def clip(self, box_size: Tuple[int, int]) -> None:\r\n \"\"\"\r\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\r\n and y coordinates to the range [0, height].\r\n\r\n Args:\r\n box_size (height, width): The clipping box's size.\r\n \"\"\"\r\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\r\n h, w = box_size\r\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\r\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\r\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\r\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\r\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\r\n\r\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\r\n \"\"\"\r\n Find boxes that are non-empty.\r\n A box is considered empty, if either of its side is no larger than threshold.\r\n\r\n Returns:\r\n Tensor:\r\n a binary vector which represents whether each box is empty\r\n (False) or non-empty (True).\r\n \"\"\"\r\n box = self.tensor\r\n widths = box[:, 2] - box[:, 0]\r\n heights = box[:, 3] - box[:, 1]\r\n keep = (widths > threshold) & (heights > threshold)\r\n return keep\r\n\r\n def __getitem__(self, item) -> \"Boxes\":\r\n \"\"\"\r\n Args:\r\n item: int, slice, or a BoolTensor\r\n\r\n Returns:\r\n Boxes: Create a new :class:`Boxes` by indexing.\r\n\r\n The following usage are allowed:\r\n\r\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\r\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\r\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\r\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\r\n\r\n Note that the returned Boxes might share storage with this Boxes,\r\n subject to Pytorch's indexing semantics.\r\n \"\"\"\r\n if isinstance(item, int):\r\n return Boxes(self.tensor[item].view(1, -1))\r\n b = self.tensor[item]\r\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\r\n return Boxes(b)\r\n\r\n def __len__(self) -> int:\r\n return self.tensor.shape[0]\r\n\r\n def __repr__(self) -> str:\r\n return \"Boxes(\" + str(self.tensor) + \")\"\r\n\r\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\r\n \"\"\"\r\n Args:\r\n box_size (height, width): Size of the reference box.\r\n boundary_threshold (int): Boxes that extend beyond the reference box\r\n boundary by more than boundary_threshold are considered \"outside\".\r\n\r\n Returns:\r\n a binary vector, indicating whether each box is inside the reference box.\r\n \"\"\"\r\n height, width = box_size\r\n inds_inside = (\r\n (self.tensor[..., 0] >= -boundary_threshold)\r\n & (self.tensor[..., 1] >= -boundary_threshold)\r\n & (self.tensor[..., 2] < width + boundary_threshold)\r\n & (self.tensor[..., 3] < height + boundary_threshold)\r\n )\r\n return inds_inside\r\n\r\n def get_centers(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns:\r\n The box centers in a Nx2 array of (x, y).\r\n \"\"\"\r\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\r\n\r\n def scale(self, scale_x: float, scale_y: float) -> None:\r\n \"\"\"\r\n Scale the box with horizontal and vertical scaling factors\r\n \"\"\"\r\n self.tensor[:, 0::2] *= scale_x\r\n self.tensor[:, 1::2] *= scale_y\r\n\r\n @classmethod\r\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\r\n \"\"\"\r\n Concatenates a list of Boxes into a single Boxes\r\n\r\n Arguments:\r\n boxes_list (list[Boxes])\r\n\r\n Returns:\r\n Boxes: the concatenated Boxes\r\n \"\"\"\r\n assert isinstance(boxes_list, (list, tuple))\r\n if len(boxes_list) == 0:\r\n return cls(torch.empty(0))\r\n assert all([isinstance(box, Boxes) for box in boxes_list])\r\n\r\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\r\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\r\n return cat_boxes\r\n\r\n @property\r\n def device(self) -> device:\r\n return self.tensor.device\r\n\r\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\r\n # https://github.com/pytorch/pytorch/issues/18627\r\n @torch.jit.unused\r\n def __iter__(self):\r\n \"\"\"\r\n Yield a box as a Tensor of shape (4,) at a time.\r\n \"\"\"\r\n yield from self.tensor\r" }, { "identifier": "pairwise_iou", "path": "annotator/oneformer/detectron2/structures/boxes.py", "snippet": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\r\n \"\"\"\r\n Given two lists of boxes of size N and M, compute the IoU\r\n (intersection over union) between **all** N x M pairs of boxes.\r\n The box order must be (xmin, ymin, xmax, ymax).\r\n\r\n Args:\r\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\r\n\r\n Returns:\r\n Tensor: IoU, sized [N,M].\r\n \"\"\"\r\n area1 = boxes1.area() # [N]\r\n area2 = boxes2.area() # [M]\r\n inter = pairwise_intersection(boxes1, boxes2)\r\n\r\n # handle empty boxes\r\n iou = torch.where(\r\n inter > 0,\r\n inter / (area1[:, None] + area2 - inter),\r\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\r\n )\r\n return iou\r" }, { "identifier": "ImageList", "path": "annotator/oneformer/detectron2/structures/image_list.py", "snippet": "class ImageList(object):\r\n \"\"\"\r\n Structure that holds a list of images (of possibly\r\n varying sizes) as a single tensor.\r\n This works by padding the images to the same size.\r\n The original sizes of each image is stored in `image_sizes`.\r\n\r\n Attributes:\r\n image_sizes (list[tuple[int, int]]): each tuple is (h, w).\r\n During tracing, it becomes list[Tensor] instead.\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):\r\n \"\"\"\r\n Arguments:\r\n tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1\r\n image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can\r\n be smaller than (H, W) due to padding.\r\n \"\"\"\r\n self.tensor = tensor\r\n self.image_sizes = image_sizes\r\n\r\n def __len__(self) -> int:\r\n return len(self.image_sizes)\r\n\r\n def __getitem__(self, idx) -> torch.Tensor:\r\n \"\"\"\r\n Access the individual image in its original size.\r\n\r\n Args:\r\n idx: int or slice\r\n\r\n Returns:\r\n Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1\r\n \"\"\"\r\n size = self.image_sizes[idx]\r\n return self.tensor[idx, ..., : size[0], : size[1]]\r\n\r\n @torch.jit.unused\r\n def to(self, *args: Any, **kwargs: Any) -> \"ImageList\":\r\n cast_tensor = self.tensor.to(*args, **kwargs)\r\n return ImageList(cast_tensor, self.image_sizes)\r\n\r\n @property\r\n def device(self) -> device:\r\n return self.tensor.device\r\n\r\n @staticmethod\r\n def from_tensors(\r\n tensors: List[torch.Tensor],\r\n size_divisibility: int = 0,\r\n pad_value: float = 0.0,\r\n padding_constraints: Optional[Dict[str, int]] = None,\r\n ) -> \"ImageList\":\r\n \"\"\"\r\n Args:\r\n tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or\r\n (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded\r\n to the same shape with `pad_value`.\r\n size_divisibility (int): If `size_divisibility > 0`, add padding to ensure\r\n the common height and width is divisible by `size_divisibility`.\r\n This depends on the model and many models need a divisibility of 32.\r\n pad_value (float): value to pad.\r\n padding_constraints (optional[Dict]): If given, it would follow the format as\r\n {\"size_divisibility\": int, \"square_size\": int}, where `size_divisibility` will\r\n overwrite the above one if presented and `square_size` indicates the\r\n square padding size if `square_size` > 0.\r\n Returns:\r\n an `ImageList`.\r\n \"\"\"\r\n assert len(tensors) > 0\r\n assert isinstance(tensors, (tuple, list))\r\n for t in tensors:\r\n assert isinstance(t, torch.Tensor), type(t)\r\n assert t.shape[:-2] == tensors[0].shape[:-2], t.shape\r\n\r\n image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]\r\n image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]\r\n max_size = torch.stack(image_sizes_tensor).max(0).values\r\n\r\n if padding_constraints is not None:\r\n square_size = padding_constraints.get(\"square_size\", 0)\r\n if square_size > 0:\r\n # pad to square.\r\n max_size[0] = max_size[1] = square_size\r\n if \"size_divisibility\" in padding_constraints:\r\n size_divisibility = padding_constraints[\"size_divisibility\"]\r\n if size_divisibility > 1:\r\n stride = size_divisibility\r\n # the last two dims are H,W, both subject to divisibility requirement\r\n max_size = (max_size + (stride - 1)).div(stride, rounding_mode=\"floor\") * stride\r\n\r\n # handle weirdness of scripting and tracing ...\r\n if torch.jit.is_scripting():\r\n max_size: List[int] = max_size.to(dtype=torch.long).tolist()\r\n else:\r\n if torch.jit.is_tracing():\r\n image_sizes = image_sizes_tensor\r\n\r\n if len(tensors) == 1:\r\n # This seems slightly (2%) faster.\r\n # TODO: check whether it's faster for multiple images as well\r\n image_size = image_sizes[0]\r\n padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]\r\n batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)\r\n else:\r\n # max_size can be a tensor in tracing mode, therefore convert to list\r\n batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)\r\n device = (\r\n None if torch.jit.is_scripting() else (\"cpu\" if torch.jit.is_tracing() else None)\r\n )\r\n batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device)\r\n batched_imgs = move_device_like(batched_imgs, tensors[0])\r\n for i, img in enumerate(tensors):\r\n # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)`\r\n # Tracing mode cannot capture `copy_()` of temporary locals\r\n batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img)\r\n\r\n return ImageList(batched_imgs.contiguous(), image_sizes)\r" }, { "identifier": "Instances", "path": "annotator/oneformer/detectron2/structures/instances.py", "snippet": "class Instances:\r\n \"\"\"\r\n This class represents a list of instances in an image.\r\n It stores the attributes of instances (e.g., boxes, masks, labels, scores) as \"fields\".\r\n All fields must have the same ``__len__`` which is the number of instances.\r\n\r\n All other (non-field) attributes of this class are considered private:\r\n they must start with '_' and are not modifiable by a user.\r\n\r\n Some basic usage:\r\n\r\n 1. Set/get/check a field:\r\n\r\n .. code-block:: python\r\n\r\n instances.gt_boxes = Boxes(...)\r\n print(instances.pred_masks) # a tensor of shape (N, H, W)\r\n print('gt_masks' in instances)\r\n\r\n 2. ``len(instances)`` returns the number of instances\r\n 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields\r\n and returns a new :class:`Instances`.\r\n Typically, ``indices`` is a integer vector of indices,\r\n or a binary mask of length ``num_instances``\r\n\r\n .. code-block:: python\r\n\r\n category_3_detections = instances[instances.pred_classes == 3]\r\n confident_detections = instances[instances.scores > 0.9]\r\n \"\"\"\r\n\r\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\r\n \"\"\"\r\n Args:\r\n image_size (height, width): the spatial size of the image.\r\n kwargs: fields to add to this `Instances`.\r\n \"\"\"\r\n self._image_size = image_size\r\n self._fields: Dict[str, Any] = {}\r\n for k, v in kwargs.items():\r\n self.set(k, v)\r\n\r\n @property\r\n def image_size(self) -> Tuple[int, int]:\r\n \"\"\"\r\n Returns:\r\n tuple: height, width\r\n \"\"\"\r\n return self._image_size\r\n\r\n def __setattr__(self, name: str, val: Any) -> None:\r\n if name.startswith(\"_\"):\r\n super().__setattr__(name, val)\r\n else:\r\n self.set(name, val)\r\n\r\n def __getattr__(self, name: str) -> Any:\r\n if name == \"_fields\" or name not in self._fields:\r\n raise AttributeError(\"Cannot find field '{}' in the given Instances!\".format(name))\r\n return self._fields[name]\r\n\r\n def set(self, name: str, value: Any) -> None:\r\n \"\"\"\r\n Set the field named `name` to `value`.\r\n The length of `value` must be the number of instances,\r\n and must agree with other existing fields in this object.\r\n \"\"\"\r\n with warnings.catch_warnings(record=True):\r\n data_len = len(value)\r\n if len(self._fields):\r\n assert (\r\n len(self) == data_len\r\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\r\n self._fields[name] = value\r\n\r\n def has(self, name: str) -> bool:\r\n \"\"\"\r\n Returns:\r\n bool: whether the field called `name` exists.\r\n \"\"\"\r\n return name in self._fields\r\n\r\n def remove(self, name: str) -> None:\r\n \"\"\"\r\n Remove the field called `name`.\r\n \"\"\"\r\n del self._fields[name]\r\n\r\n def get(self, name: str) -> Any:\r\n \"\"\"\r\n Returns the field called `name`.\r\n \"\"\"\r\n return self._fields[name]\r\n\r\n def get_fields(self) -> Dict[str, Any]:\r\n \"\"\"\r\n Returns:\r\n dict: a dict which maps names (str) to data of the fields\r\n\r\n Modifying the returned dict will modify this instance.\r\n \"\"\"\r\n return self._fields\r\n\r\n # Tensor-like methods\r\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\r\n \"\"\"\r\n Returns:\r\n Instances: all fields are called with a `to(device)`, if the field has this method.\r\n \"\"\"\r\n ret = Instances(self._image_size)\r\n for k, v in self._fields.items():\r\n if hasattr(v, \"to\"):\r\n v = v.to(*args, **kwargs)\r\n ret.set(k, v)\r\n return ret\r\n\r\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\r\n \"\"\"\r\n Args:\r\n item: an index-like object and will be used to index all the fields.\r\n\r\n Returns:\r\n If `item` is a string, return the data in the corresponding field.\r\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\r\n \"\"\"\r\n if type(item) == int:\r\n if item >= len(self) or item < -len(self):\r\n raise IndexError(\"Instances index out of range!\")\r\n else:\r\n item = slice(item, None, len(self))\r\n\r\n ret = Instances(self._image_size)\r\n for k, v in self._fields.items():\r\n ret.set(k, v[item])\r\n return ret\r\n\r\n def __len__(self) -> int:\r\n for v in self._fields.values():\r\n # use __len__ because len() has to be int and is not friendly to tracing\r\n return v.__len__()\r\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\r\n\r\n def __iter__(self):\r\n raise NotImplementedError(\"`Instances` object is not iterable!\")\r\n\r\n @staticmethod\r\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\r\n \"\"\"\r\n Args:\r\n instance_lists (list[Instances])\r\n\r\n Returns:\r\n Instances\r\n \"\"\"\r\n assert all(isinstance(i, Instances) for i in instance_lists)\r\n assert len(instance_lists) > 0\r\n if len(instance_lists) == 1:\r\n return instance_lists[0]\r\n\r\n image_size = instance_lists[0].image_size\r\n if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing\r\n for i in instance_lists[1:]:\r\n assert i.image_size == image_size\r\n ret = Instances(image_size)\r\n for k in instance_lists[0]._fields.keys():\r\n values = [i.get(k) for i in instance_lists]\r\n v0 = values[0]\r\n if isinstance(v0, torch.Tensor):\r\n values = torch.cat(values, dim=0)\r\n elif isinstance(v0, list):\r\n values = list(itertools.chain(*values))\r\n elif hasattr(type(v0), \"cat\"):\r\n values = type(v0).cat(values)\r\n else:\r\n raise ValueError(\"Unsupported type {} for concatenation\".format(type(v0)))\r\n ret.set(k, values)\r\n return ret\r\n\r\n def __str__(self) -> str:\r\n s = self.__class__.__name__ + \"(\"\r\n s += \"num_instances={}, \".format(len(self))\r\n s += \"image_height={}, \".format(self._image_size[0])\r\n s += \"image_width={}, \".format(self._image_size[1])\r\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\r\n return s\r\n\r\n __repr__ = __str__\r" }, { "identifier": "get_event_storage", "path": "annotator/oneformer/detectron2/utils/events.py", "snippet": "def get_event_storage():\r\n \"\"\"\r\n Returns:\r\n The :class:`EventStorage` object that's currently being used.\r\n Throws an error if no :class:`EventStorage` is currently enabled.\r\n \"\"\"\r\n assert len(\r\n _CURRENT_STORAGE_STACK\r\n ), \"get_event_storage() has to be called inside a 'with EventStorage(...)' context!\"\r\n return _CURRENT_STORAGE_STACK[-1]\r" }, { "identifier": "build_anchor_generator", "path": "annotator/oneformer/detectron2/modeling/anchor_generator.py", "snippet": "def build_anchor_generator(cfg, input_shape):\r\n \"\"\"\r\n Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`.\r\n \"\"\"\r\n anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME\r\n return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape)\r" }, { "identifier": "build_backbone", "path": "annotator/oneformer/detectron2/modeling/backbone/build.py", "snippet": "def build_backbone(cfg, input_shape=None):\r\n \"\"\"\r\n Build a backbone from `cfg.MODEL.BACKBONE.NAME`.\r\n\r\n Returns:\r\n an instance of :class:`Backbone`\r\n \"\"\"\r\n if input_shape is None:\r\n input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))\r\n\r\n backbone_name = cfg.MODEL.BACKBONE.NAME\r\n backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape)\r\n assert isinstance(backbone, Backbone)\r\n return backbone\r" }, { "identifier": "Backbone", "path": "annotator/oneformer/detectron2/modeling/backbone/backbone.py", "snippet": "class Backbone(nn.Module, metaclass=ABCMeta):\r\n \"\"\"\r\n Abstract base class for network backbones.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n The `__init__` method of any subclass can specify its own set of arguments.\r\n \"\"\"\r\n super().__init__()\r\n\r\n @abstractmethod\r\n def forward(self):\r\n \"\"\"\r\n Subclasses must override this method, but adhere to the same return type.\r\n\r\n Returns:\r\n dict[str->Tensor]: mapping from feature name (e.g., \"res2\") to tensor\r\n \"\"\"\r\n pass\r\n\r\n @property\r\n def size_divisibility(self) -> int:\r\n \"\"\"\r\n Some backbones require the input height and width to be divisible by a\r\n specific integer. This is typically true for encoder / decoder type networks\r\n with lateral connection (e.g., FPN) for which feature maps need to match\r\n dimension in the \"bottom up\" and \"top down\" paths. Set to 0 if no specific\r\n input size divisibility is required.\r\n \"\"\"\r\n return 0\r\n\r\n @property\r\n def padding_constraints(self) -> Dict[str, int]:\r\n \"\"\"\r\n This property is a generalization of size_divisibility. Some backbones and training\r\n recipes require specific padding constraints, such as enforcing divisibility by a specific\r\n integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter\r\n in :paper:vitdet). `padding_constraints` contains these optional items like:\r\n {\r\n \"size_divisibility\": int,\r\n \"square_size\": int,\r\n # Future options are possible\r\n }\r\n `size_divisibility` will read from here if presented and `square_size` indicates the\r\n square padding size if `square_size` > 0.\r\n\r\n TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints\r\n could be generalized as TypedDict (Python 3.8+) to support more types in the future.\r\n \"\"\"\r\n return {}\r\n\r\n def output_shape(self):\r\n \"\"\"\r\n Returns:\r\n dict[str->ShapeSpec]\r\n \"\"\"\r\n # this is a backward-compatible default\r\n return {\r\n name: ShapeSpec(\r\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\r\n )\r\n for name in self._out_features\r\n }\r" }, { "identifier": "Box2BoxTransform", "path": "annotator/oneformer/detectron2/modeling/box_regression.py", "snippet": "class Box2BoxTransform(object):\r\n \"\"\"\r\n The box-to-box transform defined in R-CNN. The transformation is parameterized\r\n by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height\r\n by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).\r\n \"\"\"\r\n\r\n def __init__(\r\n self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP\r\n ):\r\n \"\"\"\r\n Args:\r\n weights (4-element tuple): Scaling factors that are applied to the\r\n (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set\r\n such that the deltas have unit variance; now they are treated as\r\n hyperparameters of the system.\r\n scale_clamp (float): When predicting deltas, the predicted box scaling\r\n factors (dw and dh) are clamped such that they are <= scale_clamp.\r\n \"\"\"\r\n self.weights = weights\r\n self.scale_clamp = scale_clamp\r\n\r\n def get_deltas(self, src_boxes, target_boxes):\r\n \"\"\"\r\n Get box regression transformation deltas (dx, dy, dw, dh) that can be used\r\n to transform the `src_boxes` into the `target_boxes`. That is, the relation\r\n ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless\r\n any delta is too large and is clamped).\r\n\r\n Args:\r\n src_boxes (Tensor): source boxes, e.g., object proposals\r\n target_boxes (Tensor): target of the transformation, e.g., ground-truth\r\n boxes.\r\n \"\"\"\r\n assert isinstance(src_boxes, torch.Tensor), type(src_boxes)\r\n assert isinstance(target_boxes, torch.Tensor), type(target_boxes)\r\n\r\n src_widths = src_boxes[:, 2] - src_boxes[:, 0]\r\n src_heights = src_boxes[:, 3] - src_boxes[:, 1]\r\n src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths\r\n src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights\r\n\r\n target_widths = target_boxes[:, 2] - target_boxes[:, 0]\r\n target_heights = target_boxes[:, 3] - target_boxes[:, 1]\r\n target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths\r\n target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights\r\n\r\n wx, wy, ww, wh = self.weights\r\n dx = wx * (target_ctr_x - src_ctr_x) / src_widths\r\n dy = wy * (target_ctr_y - src_ctr_y) / src_heights\r\n dw = ww * torch.log(target_widths / src_widths)\r\n dh = wh * torch.log(target_heights / src_heights)\r\n\r\n deltas = torch.stack((dx, dy, dw, dh), dim=1)\r\n assert (src_widths > 0).all().item(), \"Input boxes to Box2BoxTransform are not valid!\"\r\n return deltas\r\n\r\n def apply_deltas(self, deltas, boxes):\r\n \"\"\"\r\n Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.\r\n\r\n Args:\r\n deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.\r\n deltas[i] represents k potentially different class-specific\r\n box transformations for the single box boxes[i].\r\n boxes (Tensor): boxes to transform, of shape (N, 4)\r\n \"\"\"\r\n deltas = deltas.float() # ensure fp32 for decoding precision\r\n boxes = boxes.to(deltas.dtype)\r\n\r\n widths = boxes[:, 2] - boxes[:, 0]\r\n heights = boxes[:, 3] - boxes[:, 1]\r\n ctr_x = boxes[:, 0] + 0.5 * widths\r\n ctr_y = boxes[:, 1] + 0.5 * heights\r\n\r\n wx, wy, ww, wh = self.weights\r\n dx = deltas[:, 0::4] / wx\r\n dy = deltas[:, 1::4] / wy\r\n dw = deltas[:, 2::4] / ww\r\n dh = deltas[:, 3::4] / wh\r\n\r\n # Prevent sending too large values into torch.exp()\r\n dw = torch.clamp(dw, max=self.scale_clamp)\r\n dh = torch.clamp(dh, max=self.scale_clamp)\r\n\r\n pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]\r\n pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]\r\n pred_w = torch.exp(dw) * widths[:, None]\r\n pred_h = torch.exp(dh) * heights[:, None]\r\n\r\n x1 = pred_ctr_x - 0.5 * pred_w\r\n y1 = pred_ctr_y - 0.5 * pred_h\r\n x2 = pred_ctr_x + 0.5 * pred_w\r\n y2 = pred_ctr_y + 0.5 * pred_h\r\n pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1)\r\n return pred_boxes.reshape(deltas.shape)\r" }, { "identifier": "_dense_box_regression_loss", "path": "annotator/oneformer/detectron2/modeling/box_regression.py", "snippet": "def _dense_box_regression_loss(\r\n anchors: List[Union[Boxes, torch.Tensor]],\r\n box2box_transform: Box2BoxTransform,\r\n pred_anchor_deltas: List[torch.Tensor],\r\n gt_boxes: List[torch.Tensor],\r\n fg_mask: torch.Tensor,\r\n box_reg_loss_type=\"smooth_l1\",\r\n smooth_l1_beta=0.0,\r\n):\r\n \"\"\"\r\n Compute loss for dense multi-level box regression.\r\n Loss is accumulated over ``fg_mask``.\r\n\r\n Args:\r\n anchors: #lvl anchor boxes, each is (HixWixA, 4)\r\n pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)\r\n gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))\r\n fg_mask: the foreground boolean mask of shape (N, R) to compute loss on\r\n box_reg_loss_type (str): Loss type to use. Supported losses: \"smooth_l1\", \"giou\",\r\n \"diou\", \"ciou\".\r\n smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to\r\n use L1 loss. Only used when `box_reg_loss_type` is \"smooth_l1\"\r\n \"\"\"\r\n if isinstance(anchors[0], Boxes):\r\n anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)\r\n else:\r\n anchors = cat(anchors)\r\n if box_reg_loss_type == \"smooth_l1\":\r\n gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]\r\n gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)\r\n loss_box_reg = smooth_l1_loss(\r\n cat(pred_anchor_deltas, dim=1)[fg_mask],\r\n gt_anchor_deltas[fg_mask],\r\n beta=smooth_l1_beta,\r\n reduction=\"sum\",\r\n )\r\n elif box_reg_loss_type == \"giou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = giou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n elif box_reg_loss_type == \"diou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = diou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n elif box_reg_loss_type == \"ciou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = ciou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n else:\r\n raise ValueError(f\"Invalid dense box regression loss type '{box_reg_loss_type}'\")\r\n return loss_box_reg\r" }, { "identifier": "Matcher", "path": "annotator/oneformer/detectron2/modeling/matcher.py", "snippet": "class Matcher(object):\r\n \"\"\"\r\n This class assigns to each predicted \"element\" (e.g., a box) a ground-truth\r\n element. Each predicted element will have exactly zero or one matches; each\r\n ground-truth element may be matched to zero or more predicted elements.\r\n\r\n The matching is determined by the MxN match_quality_matrix, that characterizes\r\n how well each (ground-truth, prediction)-pair match each other. For example,\r\n if the elements are boxes, this matrix may contain box intersection-over-union\r\n overlap values.\r\n\r\n The matcher returns (a) a vector of length N containing the index of the\r\n ground-truth element m in [0, M) that matches to prediction n in [0, N).\r\n (b) a vector of length N containing the labels for each prediction.\r\n \"\"\"\r\n\r\n def __init__(\r\n self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False\r\n ):\r\n \"\"\"\r\n Args:\r\n thresholds (list): a list of thresholds used to stratify predictions\r\n into levels.\r\n labels (list): a list of values to label predictions belonging at\r\n each level. A label can be one of {-1, 0, 1} signifying\r\n {ignore, negative class, positive class}, respectively.\r\n allow_low_quality_matches (bool): if True, produce additional matches\r\n for predictions with maximum match quality lower than high_threshold.\r\n See set_low_quality_matches_ for more details.\r\n\r\n For example,\r\n thresholds = [0.3, 0.5]\r\n labels = [0, -1, 1]\r\n All predictions with iou < 0.3 will be marked with 0 and\r\n thus will be considered as false positives while training.\r\n All predictions with 0.3 <= iou < 0.5 will be marked with -1 and\r\n thus will be ignored.\r\n All predictions with 0.5 <= iou will be marked with 1 and\r\n thus will be considered as true positives.\r\n \"\"\"\r\n # Add -inf and +inf to first and last position in thresholds\r\n thresholds = thresholds[:]\r\n assert thresholds[0] > 0\r\n thresholds.insert(0, -float(\"inf\"))\r\n thresholds.append(float(\"inf\"))\r\n # Currently torchscript does not support all + generator\r\n assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])])\r\n assert all([l in [-1, 0, 1] for l in labels])\r\n assert len(labels) == len(thresholds) - 1\r\n self.thresholds = thresholds\r\n self.labels = labels\r\n self.allow_low_quality_matches = allow_low_quality_matches\r\n\r\n def __call__(self, match_quality_matrix):\r\n \"\"\"\r\n Args:\r\n match_quality_matrix (Tensor[float]): an MxN tensor, containing the\r\n pairwise quality between M ground-truth elements and N predicted\r\n elements. All elements must be >= 0 (due to the us of `torch.nonzero`\r\n for selecting indices in :meth:`set_low_quality_matches_`).\r\n\r\n Returns:\r\n matches (Tensor[int64]): a vector of length N, where matches[i] is a matched\r\n ground-truth index in [0, M)\r\n match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates\r\n whether a prediction is a true or false positive or ignored\r\n \"\"\"\r\n assert match_quality_matrix.dim() == 2\r\n if match_quality_matrix.numel() == 0:\r\n default_matches = match_quality_matrix.new_full(\r\n (match_quality_matrix.size(1),), 0, dtype=torch.int64\r\n )\r\n # When no gt boxes exist, we define IOU = 0 and therefore set labels\r\n # to `self.labels[0]`, which usually defaults to background class 0\r\n # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds\r\n default_match_labels = match_quality_matrix.new_full(\r\n (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8\r\n )\r\n return default_matches, default_match_labels\r\n\r\n assert torch.all(match_quality_matrix >= 0)\r\n\r\n # match_quality_matrix is M (gt) x N (predicted)\r\n # Max over gt elements (dim 0) to find best gt candidate for each prediction\r\n matched_vals, matches = match_quality_matrix.max(dim=0)\r\n\r\n match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)\r\n\r\n for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):\r\n low_high = (matched_vals >= low) & (matched_vals < high)\r\n match_labels[low_high] = l\r\n\r\n if self.allow_low_quality_matches:\r\n self.set_low_quality_matches_(match_labels, match_quality_matrix)\r\n\r\n return matches, match_labels\r\n\r\n def set_low_quality_matches_(self, match_labels, match_quality_matrix):\r\n \"\"\"\r\n Produce additional matches for predictions that have only low-quality matches.\r\n Specifically, for each ground-truth G find the set of predictions that have\r\n maximum overlap with it (including ties); for each prediction in that set, if\r\n it is unmatched, then match it to the ground-truth G.\r\n\r\n This function implements the RPN assignment case (i) in Sec. 3.1.2 of\r\n :paper:`Faster R-CNN`.\r\n \"\"\"\r\n # For each gt, find the prediction with which it has highest quality\r\n highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)\r\n # Find the highest quality match available, even if it is low, including ties.\r\n # Note that the matches qualities must be positive due to the use of\r\n # `torch.nonzero`.\r\n _, pred_inds_with_highest_quality = nonzero_tuple(\r\n match_quality_matrix == highest_quality_foreach_gt[:, None]\r\n )\r\n # If an anchor was labeled positive only due to a low-quality match\r\n # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B.\r\n # This follows the implementation in Detectron, and is found to have no significant impact.\r\n match_labels[pred_inds_with_highest_quality] = 1\r" }, { "identifier": "META_ARCH_REGISTRY", "path": "annotator/oneformer/detectron2/modeling/meta_arch/build.py", "snippet": "META_ARCH_REGISTRY = Registry(\"META_ARCH\") # noqa F401 isort:skip\r" }, { "identifier": "DenseDetector", "path": "annotator/oneformer/detectron2/modeling/meta_arch/dense_detector.py", "snippet": "class DenseDetector(nn.Module):\r\n \"\"\"\r\n Base class for dense detector. We define a dense detector as a fully-convolutional model that\r\n makes per-pixel (i.e. dense) predictions.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n backbone: Backbone,\r\n head: nn.Module,\r\n head_in_features: Optional[List[str]] = None,\r\n *,\r\n pixel_mean,\r\n pixel_std,\r\n ):\r\n \"\"\"\r\n Args:\r\n backbone: backbone module\r\n head: head module\r\n head_in_features: backbone features to use in head. Default to all backbone features.\r\n pixel_mean (Tuple[float]):\r\n Values to be used for image normalization (BGR order).\r\n To train on images of different number of channels, set different mean & std.\r\n Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]\r\n pixel_std (Tuple[float]):\r\n When using pre-trained models in Detectron1 or any MSRA models,\r\n std has been absorbed into its conv1 weights, so the std needs to be set 1.\r\n Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)\r\n \"\"\"\r\n super().__init__()\r\n\r\n self.backbone = backbone\r\n self.head = head\r\n if head_in_features is None:\r\n shapes = self.backbone.output_shape()\r\n self.head_in_features = sorted(shapes.keys(), key=lambda x: shapes[x].stride)\r\n else:\r\n self.head_in_features = head_in_features\r\n self.register_buffer(\"pixel_mean\", torch.tensor(pixel_mean).view(-1, 1, 1), False)\r\n self.register_buffer(\"pixel_std\", torch.tensor(pixel_std).view(-1, 1, 1), False)\r\n\r\n @property\r\n def device(self):\r\n return self.pixel_mean.device\r\n\r\n def _move_to_current_device(self, x):\r\n return move_device_like(x, self.pixel_mean)\r\n\r\n def forward(self, batched_inputs: List[Dict[str, Tensor]]):\r\n \"\"\"\r\n Args:\r\n batched_inputs: a list, batched outputs of :class:`DatasetMapper` .\r\n Each item in the list contains the inputs for one image.\r\n For now, each item in the list is a dict that contains:\r\n\r\n * image: Tensor, image in (C, H, W) format.\r\n * instances: Instances\r\n\r\n Other information that's included in the original dicts, such as:\r\n\r\n * \"height\", \"width\" (int): the output resolution of the model, used in inference.\r\n See :meth:`postprocess` for details.\r\n\r\n Returns:\r\n In training, dict[str, Tensor]: mapping from a named loss to a tensor storing the\r\n loss. Used during training only. In inference, the standard output format, described\r\n in :doc:`/tutorials/models`.\r\n \"\"\"\r\n images = self.preprocess_image(batched_inputs)\r\n features = self.backbone(images.tensor)\r\n features = [features[f] for f in self.head_in_features]\r\n predictions = self.head(features)\r\n\r\n if self.training:\r\n assert not torch.jit.is_scripting(), \"Not supported\"\r\n assert \"instances\" in batched_inputs[0], \"Instance annotations are missing in training!\"\r\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\r\n return self.forward_training(images, features, predictions, gt_instances)\r\n else:\r\n results = self.forward_inference(images, features, predictions)\r\n if torch.jit.is_scripting():\r\n return results\r\n\r\n processed_results = []\r\n for results_per_image, input_per_image, image_size in zip(\r\n results, batched_inputs, images.image_sizes\r\n ):\r\n height = input_per_image.get(\"height\", image_size[0])\r\n width = input_per_image.get(\"width\", image_size[1])\r\n r = detector_postprocess(results_per_image, height, width)\r\n processed_results.append({\"instances\": r})\r\n return processed_results\r\n\r\n def forward_training(self, images, features, predictions, gt_instances):\r\n raise NotImplementedError()\r\n\r\n def preprocess_image(self, batched_inputs: List[Dict[str, Tensor]]):\r\n \"\"\"\r\n Normalize, pad and batch the input images.\r\n \"\"\"\r\n images = [self._move_to_current_device(x[\"image\"]) for x in batched_inputs]\r\n images = [(x - self.pixel_mean) / self.pixel_std for x in images]\r\n images = ImageList.from_tensors(\r\n images,\r\n self.backbone.size_divisibility,\r\n padding_constraints=self.backbone.padding_constraints,\r\n )\r\n return images\r\n\r\n def _transpose_dense_predictions(\r\n self, predictions: List[List[Tensor]], dims_per_anchor: List[int]\r\n ) -> List[List[Tensor]]:\r\n \"\"\"\r\n Transpose the dense per-level predictions.\r\n\r\n Args:\r\n predictions: a list of outputs, each is a list of per-level\r\n predictions with shape (N, Ai x K, Hi, Wi), where N is the\r\n number of images, Ai is the number of anchors per location on\r\n level i, K is the dimension of predictions per anchor.\r\n dims_per_anchor: the value of K for each predictions. e.g. 4 for\r\n box prediction, #classes for classification prediction.\r\n\r\n Returns:\r\n List[List[Tensor]]: each prediction is transposed to (N, Hi x Wi x Ai, K).\r\n \"\"\"\r\n assert len(predictions) == len(dims_per_anchor)\r\n res: List[List[Tensor]] = []\r\n for pred, dim_per_anchor in zip(predictions, dims_per_anchor):\r\n pred = [permute_to_N_HWA_K(x, dim_per_anchor) for x in pred]\r\n res.append(pred)\r\n return res\r\n\r\n def _ema_update(self, name: str, value: float, initial_value: float, momentum: float = 0.9):\r\n \"\"\"\r\n Apply EMA update to `self.name` using `value`.\r\n\r\n This is mainly used for loss normalizer. In Detectron1, loss is normalized by number\r\n of foreground samples in the batch. When batch size is 1 per GPU, #foreground has a\r\n large variance and using it lead to lower performance. Therefore we maintain an EMA of\r\n #foreground to stabilize the normalizer.\r\n\r\n Args:\r\n name: name of the normalizer\r\n value: the new value to update\r\n initial_value: the initial value to start with\r\n momentum: momentum of EMA\r\n\r\n Returns:\r\n float: the updated EMA value\r\n \"\"\"\r\n if hasattr(self, name):\r\n old = getattr(self, name)\r\n else:\r\n old = initial_value\r\n new = old * momentum + value * (1 - momentum)\r\n setattr(self, name, new)\r\n return new\r\n\r\n def _decode_per_level_predictions(\r\n self,\r\n anchors: Boxes,\r\n pred_scores: Tensor,\r\n pred_deltas: Tensor,\r\n score_thresh: float,\r\n topk_candidates: int,\r\n image_size: Tuple[int, int],\r\n ) -> Instances:\r\n \"\"\"\r\n Decode boxes and classification predictions of one featuer level, by\r\n the following steps:\r\n 1. filter the predictions based on score threshold and top K scores.\r\n 2. transform the box regression outputs\r\n 3. return the predicted scores, classes and boxes\r\n\r\n Args:\r\n anchors: Boxes, anchor for this feature level\r\n pred_scores: HxWxA,K\r\n pred_deltas: HxWxA,4\r\n\r\n Returns:\r\n Instances: with field \"scores\", \"pred_boxes\", \"pred_classes\".\r\n \"\"\"\r\n # Apply two filtering to make NMS faster.\r\n # 1. Keep boxes with confidence score higher than threshold\r\n keep_idxs = pred_scores > score_thresh\r\n pred_scores = pred_scores[keep_idxs]\r\n topk_idxs = torch.nonzero(keep_idxs) # Kx2\r\n\r\n # 2. Keep top k top scoring boxes only\r\n topk_idxs_size = topk_idxs.shape[0]\r\n if isinstance(topk_idxs_size, Tensor):\r\n # It's a tensor in tracing\r\n num_topk = torch.clamp(topk_idxs_size, max=topk_candidates)\r\n else:\r\n num_topk = min(topk_idxs_size, topk_candidates)\r\n pred_scores, idxs = pred_scores.topk(num_topk)\r\n topk_idxs = topk_idxs[idxs]\r\n\r\n anchor_idxs, classes_idxs = topk_idxs.unbind(dim=1)\r\n\r\n pred_boxes = self.box2box_transform.apply_deltas(\r\n pred_deltas[anchor_idxs], anchors.tensor[anchor_idxs]\r\n )\r\n return Instances(\r\n image_size, pred_boxes=Boxes(pred_boxes), scores=pred_scores, pred_classes=classes_idxs\r\n )\r\n\r\n def _decode_multi_level_predictions(\r\n self,\r\n anchors: List[Boxes],\r\n pred_scores: List[Tensor],\r\n pred_deltas: List[Tensor],\r\n score_thresh: float,\r\n topk_candidates: int,\r\n image_size: Tuple[int, int],\r\n ) -> Instances:\r\n \"\"\"\r\n Run `_decode_per_level_predictions` for all feature levels and concat the results.\r\n \"\"\"\r\n predictions = [\r\n self._decode_per_level_predictions(\r\n anchors_i,\r\n box_cls_i,\r\n box_reg_i,\r\n self.test_score_thresh,\r\n self.test_topk_candidates,\r\n image_size,\r\n )\r\n # Iterate over every feature level\r\n for box_cls_i, box_reg_i, anchors_i in zip(pred_scores, pred_deltas, anchors)\r\n ]\r\n return predictions[0].cat(predictions) # 'Instances.cat' is not scriptale but this is\r\n\r\n def visualize_training(self, batched_inputs, results):\r\n \"\"\"\r\n A function used to visualize ground truth images and final network predictions.\r\n It shows ground truth bounding boxes on the original image and up to 20\r\n predicted object bounding boxes on the original image.\r\n\r\n Args:\r\n batched_inputs (list): a list that contains input to the model.\r\n results (List[Instances]): a list of #images elements returned by forward_inference().\r\n \"\"\"\r\n from annotator.oneformer.detectron2.utils.visualizer import Visualizer\r\n\r\n assert len(batched_inputs) == len(\r\n results\r\n ), \"Cannot visualize inputs and results of different sizes\"\r\n storage = get_event_storage()\r\n max_boxes = 20\r\n\r\n image_index = 0 # only visualize a single image\r\n img = batched_inputs[image_index][\"image\"]\r\n img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)\r\n v_gt = Visualizer(img, None)\r\n v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index][\"instances\"].gt_boxes)\r\n anno_img = v_gt.get_image()\r\n processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])\r\n predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()\r\n\r\n v_pred = Visualizer(img, None)\r\n v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])\r\n prop_img = v_pred.get_image()\r\n vis_img = np.vstack((anno_img, prop_img))\r\n vis_img = vis_img.transpose(2, 0, 1)\r\n vis_name = f\"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results\"\r\n storage.put_image(vis_name, vis_img)\r" }, { "identifier": "permute_to_N_HWA_K", "path": "annotator/oneformer/detectron2/modeling/meta_arch/dense_detector.py", "snippet": "def permute_to_N_HWA_K(tensor, K: int):\r\n \"\"\"\r\n Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K)\r\n \"\"\"\r\n assert tensor.dim() == 4, tensor.shape\r\n N, _, H, W = tensor.shape\r\n tensor = tensor.view(N, -1, K, H, W)\r\n tensor = tensor.permute(0, 3, 4, 1, 2)\r\n tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K)\r\n return tensor\r" } ]
import logging import math import torch from typing import List, Tuple from fvcore.nn import sigmoid_focal_loss_jit from torch import Tensor, nn from torch.nn import functional as F from annotator.oneformer.detectron2.config import configurable from annotator.oneformer.detectron2.layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from annotator.oneformer.detectron2.utils.events import get_event_storage from ..anchor_generator import build_anchor_generator from ..backbone import Backbone, build_backbone from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from .build import META_ARCH_REGISTRY from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa
17,662
anchors = Boxes.cat(anchors) # Rx4 gt_labels = [] matched_gt_boxes = [] for gt_per_image in gt_instances: match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors) matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix) del match_quality_matrix if len(gt_per_image) > 0: matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs] gt_labels_i = gt_per_image.gt_classes[matched_idxs] # Anchors with label 0 are treated as background. gt_labels_i[anchor_labels == 0] = self.num_classes # Anchors with label -1 are ignored. gt_labels_i[anchor_labels == -1] = -1 else: matched_gt_boxes_i = torch.zeros_like(anchors.tensor) gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes gt_labels.append(gt_labels_i) matched_gt_boxes.append(matched_gt_boxes_i) return gt_labels, matched_gt_boxes def forward_inference( self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]] ): pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) results: List[Instances] = [] for img_idx, image_size in enumerate(images.image_sizes): scores_per_image = [x[img_idx].sigmoid_() for x in pred_logits] deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] results_per_image = self.inference_single_image( anchors, scores_per_image, deltas_per_image, image_size ) results.append(results_per_image) return results def inference_single_image( self, anchors: List[Boxes], box_cls: List[Tensor], box_delta: List[Tensor], image_size: Tuple[int, int], ): """ Single-image inference. Return bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Arguments: anchors (list[Boxes]): list of #feature levels. Each entry contains a Boxes object, which contains all the anchors in that feature level. box_cls (list[Tensor]): list of #feature levels. Each entry contains tensor of size (H x W x A, K) box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. image_size (tuple(H, W)): a tuple of the image height and width. Returns: Same as `inference`, but for only one image. """ pred = self._decode_multi_level_predictions( anchors, box_cls, box_delta, self.test_score_thresh, self.test_topk_candidates, image_size, ) keep = batched_nms( # per-class NMS pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh ) return pred[keep[: self.max_detections_per_image]] class RetinaNetHead(nn.Module): """ The head used in RetinaNet for object classification and box regression. It has two subnets for the two tasks, with a common structure but separate parameters. """ @configurable def __init__( self, *, input_shape: List[ShapeSpec], num_classes, num_anchors, conv_dims: List[int], norm="", prior_prob=0.01, ): """ NOTE: this interface is experimental. Args: input_shape (List[ShapeSpec]): input shape num_classes (int): number of classes. Used to label background proposals. num_anchors (int): number of generated anchors conv_dims (List[int]): dimensions for each convolution layer norm (str or callable): Normalization for conv layers except for the two output layers. See :func:`detectron2.layers.get_norm` for supported types. prior_prob (float): Prior weight for computing bias """ super().__init__() self._num_features = len(input_shape) if norm == "BN" or norm == "SyncBN": logger.info( f"Using domain-specific {norm} in RetinaNetHead with len={self._num_features}." ) bn_class = nn.BatchNorm2d if norm == "BN" else nn.SyncBatchNorm def norm(c):
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["RetinaNet"] logger = logging.getLogger(__name__) @META_ARCH_REGISTRY.register() class RetinaNet(DenseDetector): """ Implement RetinaNet in :paper:`RetinaNet`. """ @configurable def __init__( self, *, backbone: Backbone, head: nn.Module, head_in_features, anchor_generator, box2box_transform, anchor_matcher, num_classes, focal_loss_alpha=0.25, focal_loss_gamma=2.0, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", test_score_thresh=0.05, test_topk_candidates=1000, test_nms_thresh=0.5, max_detections_per_image=100, pixel_mean, pixel_std, vis_period=0, input_format="BGR", ): """ NOTE: this interface is experimental. Args: backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item() get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 100) # classification and regression loss gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ :, :-1 ] # no loss for the last (background) class loss_cls = sigmoid_focal_loss_jit( cat(pred_logits, dim=1)[valid_mask], gt_labels_target.to(pred_logits[0].dtype), alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction="sum", ) loss_box_reg = _dense_box_regression_loss( anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta, ) return { "loss_cls": loss_cls / normalizer, "loss_box_reg": loss_box_reg / normalizer, } @torch.no_grad() def label_anchors(self, anchors, gt_instances): """ Args: anchors (list[Boxes]): A list of #feature level Boxes. The Boxes contains anchors of this image on the specific feature level. gt_instances (list[Instances]): a list of N `Instances`s. The i-th `Instances` contains the ground-truth per-instance annotations for the i-th input image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across all feature maps (sum(Hi * Wi * A)). Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors across feature maps. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as foreground. """ anchors = Boxes.cat(anchors) # Rx4 gt_labels = [] matched_gt_boxes = [] for gt_per_image in gt_instances: match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors) matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix) del match_quality_matrix if len(gt_per_image) > 0: matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs] gt_labels_i = gt_per_image.gt_classes[matched_idxs] # Anchors with label 0 are treated as background. gt_labels_i[anchor_labels == 0] = self.num_classes # Anchors with label -1 are ignored. gt_labels_i[anchor_labels == -1] = -1 else: matched_gt_boxes_i = torch.zeros_like(anchors.tensor) gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes gt_labels.append(gt_labels_i) matched_gt_boxes.append(matched_gt_boxes_i) return gt_labels, matched_gt_boxes def forward_inference( self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]] ): pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) results: List[Instances] = [] for img_idx, image_size in enumerate(images.image_sizes): scores_per_image = [x[img_idx].sigmoid_() for x in pred_logits] deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] results_per_image = self.inference_single_image( anchors, scores_per_image, deltas_per_image, image_size ) results.append(results_per_image) return results def inference_single_image( self, anchors: List[Boxes], box_cls: List[Tensor], box_delta: List[Tensor], image_size: Tuple[int, int], ): """ Single-image inference. Return bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Arguments: anchors (list[Boxes]): list of #feature levels. Each entry contains a Boxes object, which contains all the anchors in that feature level. box_cls (list[Tensor]): list of #feature levels. Each entry contains tensor of size (H x W x A, K) box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. image_size (tuple(H, W)): a tuple of the image height and width. Returns: Same as `inference`, but for only one image. """ pred = self._decode_multi_level_predictions( anchors, box_cls, box_delta, self.test_score_thresh, self.test_topk_candidates, image_size, ) keep = batched_nms( # per-class NMS pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh ) return pred[keep[: self.max_detections_per_image]] class RetinaNetHead(nn.Module): """ The head used in RetinaNet for object classification and box regression. It has two subnets for the two tasks, with a common structure but separate parameters. """ @configurable def __init__( self, *, input_shape: List[ShapeSpec], num_classes, num_anchors, conv_dims: List[int], norm="", prior_prob=0.01, ): """ NOTE: this interface is experimental. Args: input_shape (List[ShapeSpec]): input shape num_classes (int): number of classes. Used to label background proposals. num_anchors (int): number of generated anchors conv_dims (List[int]): dimensions for each convolution layer norm (str or callable): Normalization for conv layers except for the two output layers. See :func:`detectron2.layers.get_norm` for supported types. prior_prob (float): Prior weight for computing bias """ super().__init__() self._num_features = len(input_shape) if norm == "BN" or norm == "SyncBN": logger.info( f"Using domain-specific {norm} in RetinaNetHead with len={self._num_features}." ) bn_class = nn.BatchNorm2d if norm == "BN" else nn.SyncBatchNorm def norm(c):
return CycleBatchNormList(
2
2023-12-05 02:51:53+00:00
24k
DiffusionLight/DiffusionLight
relighting/inpainter.py
[ { "identifier": "CustomStableDiffusionControlNetInpaintPipeline", "path": "relighting/pipeline.py", "snippet": "class CustomStableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n control_image: PipelineImageInput = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 1.0,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n controlnet_conditioning_scale: Union[float, List[float]] = 0.5,\n guess_mode: bool = False,\n control_guidance_start: Union[float, List[float]] = 0.0,\n control_guidance_end: Union[float, List[float]] = 1.0,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionControlNetInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionControlNetInpaintPipeline)\n\n controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet\n\n # align format for control guidance\n if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):\n control_guidance_start = len(control_guidance_end) * [control_guidance_start]\n elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):\n control_guidance_end = len(control_guidance_start) * [control_guidance_end]\n elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):\n mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1\n control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [\n control_guidance_end\n ]\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt,\n control_image,\n height,\n width,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n controlnet_conditioning_scale,\n control_guidance_start,\n control_guidance_end,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):\n controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)\n\n global_pool_conditions = (\n controlnet.config.global_pool_conditions\n if isinstance(controlnet, ControlNetModel)\n else controlnet.nets[0].config.global_pool_conditions\n )\n guess_mode = guess_mode or global_pool_conditions\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n # 4. Prepare image\n if isinstance(controlnet, ControlNetModel):\n control_image = self.prepare_control_image(\n image=control_image,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n elif isinstance(controlnet, MultiControlNetModel):\n control_images = []\n\n for control_image_ in control_image:\n control_image_ = self.prepare_control_image(\n image=control_image_,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n\n control_images.append(control_image_)\n\n control_image = control_images\n else:\n assert False\n\n # 4. Preprocess mask and image - resizes image and mask w.r.t height and width\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n mask = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n masked_image = init_image * (mask < 0.5)\n _, _, height, width = init_image.shape\n\n # 5. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps=num_inference_steps, strength=strength, device=device\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n # EDITED HERE\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask, masked_image_latents = self.prepare_mask_latents(\n mask,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7.1 Create tensor stating which controlnets to keep\n controlnet_keep = []\n for i in range(len(timesteps)):\n keeps = [\n 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)\n for s, e in zip(control_guidance_start, control_guidance_end)\n ]\n controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # controlnet(s) inference\n if guess_mode and do_classifier_free_guidance:\n # Infer ControlNet only for the conditional batch.\n control_model_input = latents\n control_model_input = self.scheduler.scale_model_input(control_model_input, t)\n controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]\n else:\n control_model_input = latent_model_input\n controlnet_prompt_embeds = prompt_embeds\n\n if isinstance(controlnet_keep[i], list):\n cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]\n else:\n controlnet_cond_scale = controlnet_conditioning_scale\n if isinstance(controlnet_cond_scale, list):\n controlnet_cond_scale = controlnet_cond_scale[0]\n cond_scale = controlnet_cond_scale * controlnet_keep[i]\n\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n control_model_input,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=control_image,\n conditioning_scale=cond_scale,\n guess_mode=guess_mode,\n return_dict=False,\n )\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n # predict the noise residual\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # If we do sequential model offloading, let's offload unet and controlnet\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n self.controlnet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "CustomStableDiffusionInpaintPipeline", "path": "relighting/pipeline_inpaintonly.py", "snippet": "class CustomStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n masked_image_latents: torch.FloatTensor = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 1.0,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionInpaintPipeline)\n\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 1. Check inputs\n self.check_inputs(\n prompt,\n height,\n width,\n strength,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n # 4. set timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps=num_inference_steps, strength=strength, device=device\n )\n # check that number of inference steps is not < 1 - as this doesn't make sense\n if num_inference_steps < 1:\n raise ValueError(\n f\"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline\"\n f\"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.\"\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 5. Preprocess mask and image\n\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask_condition = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n if masked_image_latents is None:\n masked_image = init_image * (mask_condition < 0.5)\n else:\n masked_image = masked_image_latents\n\n mask, masked_image_latents = self.prepare_mask_latents(\n mask_condition,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 8. Check that sizes of mask, masked image and latents match\n if num_channels_unet == 9:\n # default case for runwayml/stable-diffusion-inpainting\n num_channels_mask = mask.shape[1]\n num_channels_masked_image = masked_image_latents.shape[1]\n if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:\n raise ValueError(\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\n f\" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}\"\n f\" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of\"\n \" `pipeline.unet` or your `mask_image` or `image` input.\"\n )\n elif num_channels_unet != 4:\n raise ValueError(\n f\"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.\"\n )\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 10. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if not output_type == \"latent\":\n condition_kwargs = {}\n if isinstance(self.vae, AsymmetricAutoencoderKL):\n init_image = init_image.to(device=device, dtype=masked_image_latents.dtype)\n init_image_condition = init_image.clone()\n init_image = self._encode_vae_image(init_image, generator=generator)\n mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype)\n condition_kwargs = {\"image\": init_image_condition, \"mask\": mask_condition}\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, **condition_kwargs)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "CustomStableDiffusionXLInpaintPipeline", "path": "relighting/pipeline_inpaintonly.py", "snippet": "class CustomStableDiffusionXLInpaintPipeline(StableDiffusionXLInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n prompt_2: Optional[Union[str, List[str]]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n masked_image_latents: torch.FloatTensor = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 0.9999,\n num_inference_steps: int = 50,\n denoising_start: Optional[float] = None,\n denoising_end: Optional[float] = None,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n negative_prompt_2: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n original_size: Tuple[int, int] = None,\n crops_coords_top_left: Tuple[int, int] = (0, 0),\n target_size: Tuple[int, int] = None,\n negative_original_size: Optional[Tuple[int, int]] = None,\n negative_crops_coords_top_left: Tuple[int, int] = (0, 0),\n negative_target_size: Optional[Tuple[int, int]] = None,\n aesthetic_score: float = 6.0,\n negative_aesthetic_score: float = 2.5,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionXLInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionXLInpaintPipeline)\n\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 1. Check inputs\n self.check_inputs(\n prompt,\n prompt_2,\n height,\n width,\n strength,\n callback_steps,\n negative_prompt,\n negative_prompt_2,\n prompt_embeds,\n negative_prompt_embeds,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = self.encode_prompt(\n prompt=prompt,\n prompt_2=prompt_2,\n device=device,\n num_images_per_prompt=num_images_per_prompt,\n do_classifier_free_guidance=do_classifier_free_guidance,\n negative_prompt=negative_prompt,\n negative_prompt_2=negative_prompt_2,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. set timesteps\n def denoising_value_valid(dnv):\n return isinstance(denoising_end, float) and 0 < dnv < 1\n\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None\n )\n # check that number of inference steps is not < 1 - as this doesn't make sense\n if num_inference_steps < 1:\n raise ValueError(\n f\"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline\"\n f\"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.\"\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 5. Preprocess mask and image\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n mask = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n if masked_image_latents is not None:\n masked_image = masked_image_latents\n elif init_image.shape[1] == 4:\n # if images are in latent space, we can't mask it\n masked_image = None\n else:\n masked_image = init_image * (mask < 0.5)\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n # add_noise = True if denoising_start is None else False\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask, masked_image_latents = self.prepare_mask_latents(\n mask,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 8. Check that sizes of mask, masked image and latents match\n if num_channels_unet == 9:\n # default case for runwayml/stable-diffusion-inpainting\n num_channels_mask = mask.shape[1]\n num_channels_masked_image = masked_image_latents.shape[1]\n if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:\n raise ValueError(\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\n f\" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}\"\n f\" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of\"\n \" `pipeline.unet` or your `mask_image` or `image` input.\"\n )\n elif num_channels_unet != 4:\n raise ValueError(\n f\"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.\"\n )\n # 8.1 Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n height, width = latents.shape[-2:]\n height = height * self.vae_scale_factor\n width = width * self.vae_scale_factor\n\n original_size = original_size or (height, width)\n target_size = target_size or (height, width)\n\n # 10. Prepare added time ids & embeddings\n if negative_original_size is None:\n negative_original_size = original_size\n if negative_target_size is None:\n negative_target_size = target_size\n\n add_text_embeds = pooled_prompt_embeds\n add_time_ids, add_neg_time_ids = self._get_add_time_ids(\n original_size,\n crops_coords_top_left,\n target_size,\n aesthetic_score,\n negative_aesthetic_score,\n negative_original_size,\n negative_crops_coords_top_left,\n negative_target_size,\n dtype=prompt_embeds.dtype,\n )\n add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)\n add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)\n add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)\n\n prompt_embeds = prompt_embeds.to(device)\n add_text_embeds = add_text_embeds.to(device)\n add_time_ids = add_time_ids.to(device)\n\n # 11. Denoising loop\n num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)\n\n if (\n denoising_end is not None\n and denoising_start is not None\n and denoising_value_valid(denoising_end)\n and denoising_value_valid(denoising_start)\n and denoising_start >= denoising_end\n ):\n raise ValueError(\n f\"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: \"\n + f\" {denoising_end} when using type float.\"\n )\n elif denoising_end is not None and denoising_value_valid(denoising_end):\n discrete_timestep_cutoff = int(\n round(\n self.scheduler.config.num_train_timesteps\n - (denoising_end * self.scheduler.config.num_train_timesteps)\n )\n )\n num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))\n timesteps = timesteps[:num_inference_steps]\n\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # predict the noise residual\n added_cond_kwargs = {\"text_embeds\": add_text_embeds, \"time_ids\": add_time_ids}\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n if do_classifier_free_guidance and guidance_rescale > 0.0:\n # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if not output_type == \"latent\":\n # make sure the VAE is in float32 mode, as it overflows in float16\n needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast\n\n if needs_upcasting:\n self.upcast_vae()\n latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)\n\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n\n # cast back to fp16 if needed\n if needs_upcasting:\n self.vae.to(dtype=torch.float16)\n else:\n return StableDiffusionXLPipelineOutput(images=latents)\n\n # apply watermark if available\n if self.watermark is not None:\n image = self.watermark.apply_watermark(image)\n\n image = self.image_processor.postprocess(image, output_type=output_type)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image,)\n\n return StableDiffusionXLPipelineOutput(images=image)" }, { "identifier": "SAMPLERS", "path": "relighting/argument.py", "snippet": "SAMPLERS = {\n \"ddim\": DDIMScheduler,\n \"ddpm\": DDPMScheduler,\n \"unipc\": UniPCMultistepScheduler,\n}" }, { "identifier": "VAE_MODELS", "path": "relighting/argument.py", "snippet": "VAE_MODELS = {\n \"sdxl\": \"madebyollin/sdxl-vae-fp16-fix\",\n \"sdxl_fast\": \"madebyollin/sdxl-vae-fp16-fix\",\n}" }, { "identifier": "DEPTH_ESTIMATOR", "path": "relighting/argument.py", "snippet": "DEPTH_ESTIMATOR = \"Intel/dpt-hybrid-midas\"" }, { "identifier": "get_control_signal_type", "path": "relighting/argument.py", "snippet": "def get_control_signal_type(controlnet):\n if \"normal\" in controlnet:\n return \"normal\"\n elif \"depth\" in controlnet:\n return \"depth\"\n else:\n raise NotImplementedError" }, { "identifier": "estimate_scene_depth", "path": "relighting/image_processor.py", "snippet": "def estimate_scene_depth(image, depth_estimator):\n #image = feature_extractor(images=image, return_tensors=\"pt\").pixel_values.to(\"cuda\")\n #with torch.no_grad(), torch.autocast(\"cuda\"):\n # depth_map = depth_estimator(image).predicted_depth\n\n depth_map = depth_estimator(image)['predicted_depth']\n W, H = image.size\n depth_map = torch.nn.functional.interpolate(\n depth_map.unsqueeze(1),\n size=(H, W),\n mode=\"bicubic\",\n align_corners=False,\n )\n depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_map = (depth_map - depth_min) / (depth_max - depth_min)\n image = torch.cat([depth_map] * 3, dim=1)\n\n image = image.permute(0, 2, 3, 1).cpu().numpy()[0]\n image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))\n return image" }, { "identifier": "estimate_scene_normal", "path": "relighting/image_processor.py", "snippet": "def estimate_scene_normal(image, depth_estimator):\n # can be improve speed do not going back and float between numpy and torch\n normal_image = depth_estimator(image)['predicted_depth'][0]\n\n normal_image = normal_image.numpy()\n\n # upsizing image depth to match input\n hw = np.array(image).shape[:2]\n normal_image = skimage.transform.resize(normal_image, hw, preserve_range=True)\n\n image_depth = normal_image.copy()\n image_depth -= np.min(image_depth)\n image_depth /= np.max(image_depth)\n \n bg_threhold = 0.4\n\n x = cv2.Sobel(normal_image, cv2.CV_32F, 1, 0, ksize=3)\n x[image_depth < bg_threhold] = 0\n\n y = cv2.Sobel(normal_image, cv2.CV_32F, 0, 1, ksize=3)\n y[image_depth < bg_threhold] = 0\n\n z = np.ones_like(x) * np.pi * 2.0\n\n normal_image = np.stack([x, y, z], axis=2)\n normal_image /= np.sum(normal_image ** 2.0, axis=2, keepdims=True) ** 0.5\n\n # rescale back to image size\n return normal_image" }, { "identifier": "merge_normal_map", "path": "relighting/image_processor.py", "snippet": "def merge_normal_map(normal_map, normal_ball, mask_ball, x, y):\n \"\"\"\n Merge a ball to normal map using mask\n @params\n normal_amp (np.array) - normal map of the scene [height, width, 3]\n normal_ball (np.array) - normal map of the ball [ball_height, ball_width, 3]\n mask_ball (np.array) - mask of the ball [ball_height, ball_width]\n x (int) - x position of the ball (top-left)\n y (int) - y position of the ball (top-left)\n @return\n normal_mapthe merge normal map [height, width, 3] \n \"\"\"\n result = normal_map.copy()\n\n mask_ball = mask_ball[..., None]\n ball = (normal_ball * mask_ball) # alpha blending the ball\n unball = (normal_map[y:y+normal_ball.shape[0], x:x+normal_ball.shape[1]] * (1 - mask_ball)) # alpha blending the normal map\n result[y:y+normal_ball.shape[0], x:x+normal_ball.shape[1]] = ball+unball # add them together\n return result" }, { "identifier": "fill_depth_circular", "path": "relighting/image_processor.py", "snippet": "def fill_depth_circular(depth_image, x, y, r):\n depth_image = np.array(depth_image)\n\n for i in range(depth_image.shape[0]):\n for j in range(depth_image.shape[1]):\n xy = (i - x - r//2)**2 + (j - y - r//2)**2\n # if xy <= rr**2:\n # depth_image[j, i, :] = 255\n # depth_image[j, i, :] = int(minv + (maxv - minv) * z)\n if xy <= (r // 2)**2:\n depth_image[j, i, :] = 255\n \n depth_image = Image.fromarray(depth_image)\n return depth_image" }, { "identifier": "get_ideal_normal_ball", "path": "relighting/ball_processor.py", "snippet": "def get_ideal_normal_ball(size, flip_x=True):\n \"\"\"\n Generate normal ball for specific size \n Normal map is x \"left\", y up, z into the screen \n (we flip X to match sobel operator)\n @params\n - size (int) - single value of height and width\n @return:\n - normal_map (np.array) - normal map [size, size, 3]\n - mask (np.array) - mask that make a valid normal map [size,size]\n \"\"\"\n # we flip x to match sobel operator\n x = torch.linspace(1, -1, size)\n y = torch.linspace(1, -1, size)\n x = x.flip(dims=(-1,)) if not flip_x else x\n\n y, x = torch.meshgrid(y, x)\n z = (1 - x**2 - y**2)\n mask = z >= 0\n\n # clean up invalid value outsize the mask\n x = x * mask\n y = y * mask\n z = z * mask\n \n # get real z value\n z = torch.sqrt(z)\n \n # clean up normal map value outside mask \n normal_map = torch.cat([x[..., None], y[..., None], z[..., None]], dim=-1)\n normal_map = normal_map.numpy()\n mask = mask.numpy()\n return normal_map, mask" }, { "identifier": "crop_ball", "path": "relighting/ball_processor.py", "snippet": "def crop_ball(image, mask_ball, x, y, size, apply_mask=True, bg_color = (0, 0, 0)):\n if isinstance(image, Image.Image):\n result = np.array(image)\n else:\n result = image.copy()\n \n result = result[y:y+size, x:x+size]\n if apply_mask:\n result[~mask_ball] = bg_color\n return result" }, { "identifier": "CustomStableDiffusionXLControlNetInpaintPipeline", "path": "relighting/pipeline_xl.py", "snippet": "class CustomStableDiffusionXLControlNetInpaintPipeline(StableDiffusionXLControlNetInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n prompt_2: Optional[Union[str, List[str]]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n control_image: Union[\n PipelineImageInput,\n List[PipelineImageInput],\n ] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 0.9999,\n num_inference_steps: int = 50,\n denoising_start: Optional[float] = None,\n denoising_end: Optional[float] = None,\n guidance_scale: float = 5.0,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n negative_prompt_2: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n controlnet_conditioning_scale: Union[float, List[float]] = 1.0,\n guess_mode: bool = False,\n control_guidance_start: Union[float, List[float]] = 0.0,\n control_guidance_end: Union[float, List[float]] = 1.0,\n guidance_rescale: float = 0.0,\n original_size: Tuple[int, int] = None,\n crops_coords_top_left: Tuple[int, int] = (0, 0),\n target_size: Tuple[int, int] = None,\n aesthetic_score: float = 6.0,\n negative_aesthetic_score: float = 2.5,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionXLControlNetInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionXLControlNetInpaintPipeline)\n\n controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet\n\n # align format for control guidance\n if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):\n control_guidance_start = len(control_guidance_end) * [control_guidance_start]\n elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):\n control_guidance_end = len(control_guidance_start) * [control_guidance_end]\n elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):\n mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1\n control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [\n control_guidance_end\n ]\n\n # # 0.0 Default height and width to unet\n # height = height or self.unet.config.sample_size * self.vae_scale_factor\n # width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 0.1 align format for control guidance\n if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):\n control_guidance_start = len(control_guidance_end) * [control_guidance_start]\n elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):\n control_guidance_end = len(control_guidance_start) * [control_guidance_end]\n elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):\n mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1\n control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [\n control_guidance_end\n ]\n\n # 1. Check inputs\n self.check_inputs(\n prompt,\n prompt_2,\n control_image,\n strength,\n num_inference_steps,\n callback_steps,\n negative_prompt,\n negative_prompt_2,\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n controlnet_conditioning_scale,\n control_guidance_start,\n control_guidance_end,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):\n controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = self.encode_prompt(\n prompt=prompt,\n prompt_2=prompt_2,\n device=device,\n num_images_per_prompt=num_images_per_prompt,\n do_classifier_free_guidance=do_classifier_free_guidance,\n negative_prompt=negative_prompt,\n negative_prompt_2=negative_prompt_2,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. set timesteps\n def denoising_value_valid(dnv):\n return isinstance(denoising_end, float) and 0 < dnv < 1\n\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None\n )\n # check that number of inference steps is not < 1 - as this doesn't make sense\n if num_inference_steps < 1:\n raise ValueError(\n f\"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline\"\n f\"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.\"\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 5. Preprocess mask and image - resizes image and mask w.r.t height and width\n # 5.1 Prepare init image\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n # 5.2 Prepare control images\n if isinstance(controlnet, ControlNetModel):\n control_image = self.prepare_control_image(\n image=control_image,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n elif isinstance(controlnet, MultiControlNetModel):\n control_images = []\n\n for control_image_ in control_image:\n control_image_ = self.prepare_control_image(\n image=control_image_,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n\n control_images.append(control_image_)\n\n control_image = control_images\n else:\n raise ValueError(f\"{controlnet.__class__} is not supported.\")\n\n # 5.3 Prepare mask\n mask = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n masked_image = init_image * (mask < 0.5)\n _, _, height, width = init_image.shape\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n add_noise = True if denoising_start is None else False\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask, masked_image_latents = self.prepare_mask_latents(\n mask,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 8. Check that sizes of mask, masked image and latents match\n if num_channels_unet == 9:\n # default case for runwayml/stable-diffusion-inpainting\n num_channels_mask = mask.shape[1]\n num_channels_masked_image = masked_image_latents.shape[1]\n if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:\n raise ValueError(\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\n f\" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}\"\n f\" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of\"\n \" `pipeline.unet` or your `mask_image` or `image` input.\"\n )\n elif num_channels_unet != 4:\n raise ValueError(\n f\"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.\"\n )\n # 8.1 Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8.2 Create tensor stating which controlnets to keep\n controlnet_keep = []\n for i in range(len(timesteps)):\n keeps = [\n 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)\n for s, e in zip(control_guidance_start, control_guidance_end)\n ]\n if isinstance(self.controlnet, MultiControlNetModel):\n controlnet_keep.append(keeps)\n else:\n controlnet_keep.append(keeps[0])\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n height, width = latents.shape[-2:]\n height = height * self.vae_scale_factor\n width = width * self.vae_scale_factor\n\n original_size = original_size or (height, width)\n target_size = target_size or (height, width)\n\n # 10. Prepare added time ids & embeddings\n add_text_embeds = pooled_prompt_embeds\n add_time_ids, add_neg_time_ids = self._get_add_time_ids(\n original_size,\n crops_coords_top_left,\n target_size,\n aesthetic_score,\n negative_aesthetic_score,\n dtype=prompt_embeds.dtype,\n )\n add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)\n add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)\n add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)\n\n prompt_embeds = prompt_embeds.to(device)\n add_text_embeds = add_text_embeds.to(device)\n add_time_ids = add_time_ids.to(device)\n\n # 11. Denoising loop\n num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)\n\n if (\n denoising_end is not None\n and denoising_start is not None\n and denoising_value_valid(denoising_end)\n and denoising_value_valid(denoising_start)\n and denoising_start >= denoising_end\n ):\n raise ValueError(\n f\"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: \"\n + f\" {denoising_end} when using type float.\"\n )\n elif denoising_end is not None and denoising_value_valid(denoising_end):\n discrete_timestep_cutoff = int(\n round(\n self.scheduler.config.num_train_timesteps\n - (denoising_end * self.scheduler.config.num_train_timesteps)\n )\n )\n num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))\n timesteps = timesteps[:num_inference_steps]\n\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n added_cond_kwargs = {\"text_embeds\": add_text_embeds, \"time_ids\": add_time_ids}\n\n # controlnet(s) inference\n if guess_mode and do_classifier_free_guidance:\n # Infer ControlNet only for the conditional batch.\n control_model_input = latents\n control_model_input = self.scheduler.scale_model_input(control_model_input, t)\n controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]\n controlnet_added_cond_kwargs = {\n \"text_embeds\": add_text_embeds.chunk(2)[1],\n \"time_ids\": add_time_ids.chunk(2)[1],\n }\n else:\n control_model_input = latent_model_input\n controlnet_prompt_embeds = prompt_embeds\n controlnet_added_cond_kwargs = added_cond_kwargs\n\n if isinstance(controlnet_keep[i], list):\n cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]\n else:\n controlnet_cond_scale = controlnet_conditioning_scale\n if isinstance(controlnet_cond_scale, list):\n controlnet_cond_scale = controlnet_cond_scale[0]\n cond_scale = controlnet_cond_scale * controlnet_keep[i]\n\n # # Resize control_image to match the size of the input to the controlnet\n # if control_image.shape[-2:] != control_model_input.shape[-2:]:\n # control_image = F.interpolate(control_image, size=control_model_input.shape[-2:], mode=\"bilinear\", align_corners=False)\n\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n control_model_input,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=control_image,\n conditioning_scale=cond_scale,\n guess_mode=guess_mode,\n added_cond_kwargs=controlnet_added_cond_kwargs,\n return_dict=False,\n )\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n if do_classifier_free_guidance and guidance_rescale > 0.0:\n print(\"rescale: \", guidance_rescale)\n # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # make sure the VAE is in float32 mode, as it overflows in float16\n if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:\n self.upcast_vae()\n latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)\n\n # If we do sequential model offloading, let's offload unet and controlnet\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n self.controlnet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n else:\n return StableDiffusionXLPipelineOutput(images=latents)\n\n # apply watermark if available\n if self.watermark is not None:\n image = self.watermark.apply_watermark(image)\n\n image = self.image_processor.postprocess(image, output_type=output_type)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (image,)\n\n return StableDiffusionXLPipelineOutput(images=image)" } ]
import torch import numpy as np import os import pickle from diffusers import ControlNetModel, AutoencoderKL from PIL import Image from tqdm.auto import tqdm from transformers import pipeline as transformers_pipeline from relighting.pipeline import CustomStableDiffusionControlNetInpaintPipeline from relighting.pipeline_inpaintonly import CustomStableDiffusionInpaintPipeline, CustomStableDiffusionXLInpaintPipeline from relighting.argument import SAMPLERS, VAE_MODELS, DEPTH_ESTIMATOR, get_control_signal_type from relighting.image_processor import ( estimate_scene_depth, estimate_scene_normal, merge_normal_map, fill_depth_circular ) from relighting.ball_processor import get_ideal_normal_ball, crop_ball from relighting.pipeline_xl import CustomStableDiffusionXLControlNetInpaintPipeline
17,777
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator)
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator)
normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y)
9
2023-12-07 14:03:31+00:00
24k
modelscope/normal-depth-diffusion
ldm/models/diffusion/wovae_ddpm.py
[ { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key='image',\n colorize_nlabels=None,\n monitor=None,\n prior_model=None,\n prior_normal=None,\n using_rgb=True):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n self.prior_model = prior_model\n self.using_rgb = using_rgb\n\n assert ddconfig['double_z']\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig['z_channels'],\n 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim,\n ddconfig['z_channels'], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer('colorize',\n torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n if prior_model is not None:\n self.prior_model = instantiate_from_config(prior_model)\n if prior_normal is not None:\n self.prior_normal = instantiate_from_config(prior_normal)\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n try:\n sd = torch.load(path, map_location='cpu')['state_dict']\n except:\n sd = torch.load(path, map_location='cpu')\n\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print('Deleting key {} from state_dict.'.format(k))\n del sd[k]\n m, u = self.load_state_dict(sd, strict=False)\n if len(m) > 0:\n print('missing keys:')\n print(m)\n if len(u) > 0:\n print('unexpected keys:')\n print(u)\n\n print(f'Restored from {path}')\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def prior_to_eval(self):\n\n if self.prior_model is not None:\n self.prior_model.eval()\n\n if self.prior_normal is not None:\n self.prior_normal.eval()\n\n @torch.no_grad()\n def prior_inference(self, inputs, prior_inputs):\n # depth prior model\n # midas or zoe is 384 model\n prior_results = {}\n\n self.prior_to_eval()\n\n model_prior_results = self.prior_model(prior_inputs)\n prior_results.update(model_prior_results)\n\n # using normal map\n if not self.using_rgb:\n normal_prior = self.prior_normal(prior_inputs)\n prior_results.update(normal_prior)\n\n resize_prior_results = {}\n _, __, h, w = inputs.shape\n\n for key in prior_results.keys():\n resize_prior_results[key] = F.interpolate(\n prior_results[key], (w, h), mode='bilinear')\n\n if self.using_rgb:\n return torch.cat([inputs, resize_prior_results['depth']], dim=1)\n else:\n return torch.cat([\n resize_prior_results['normal'], resize_prior_results['depth']\n ],\n dim=1)\n\n def forward(self, input, sample_posterior=True):\n\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1,\n 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n\n inputs = self.get_input(batch, self.image_key)\n if self.prior_model is not None:\n inputs = self.prior_inference(inputs, batch['prior'])\n\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='train')\n\n self.log(\n 'rec_loss',\n log_dict_ae['train/rec_loss'],\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log(\n 'aeloss',\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log_dict(\n log_dict_ae,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='train')\n\n self.log(\n 'discloss',\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log_dict(\n log_dict_disc,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='val')\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='val')\n\n self.log('val/rec_loss', log_dict_ae['val/rec_loss'])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n @torch.no_grad()\n def test_step(self, batch, batch_idx):\n pass\n\n @torch.no_grad()\n def sample_imgs(self, batch):\n '''using to test for sampling image\n\n '''\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n return {'samples': reconstructions}\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters()) + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9))\n\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n xrec = repeat(xrec[:, 0, ...], 'b h w -> b c h w', c=3)\n\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n samples = self.decode(torch.randn_like(posterior.sample()))\n samples = repeat(samples[:, 0, ...], 'b h w -> b c h w', c=3)\n log['samples'] = samples\n\n log['reconstructions'] = xrec\n log['inputs'] = x\n return log\n\n @torch.no_grad()\n def log_rgbd(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n\n if x.shape[1] == 3:\n if self.prior_model is not None:\n x = self.prior_inference(x, batch['prior'])\n\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n samples = self.decode(torch.randn_like(posterior.sample()))\n log['samples'] = samples\n log['reconstructions'] = xrec\n log['inputs'] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == 'segmentation'\n if not hasattr(self, 'colorize'):\n self.register_buffer('colorize',\n torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n def make_schedule(self,\n ddim_num_steps,\n ddim_discretize='uniform',\n ddim_eta=0.,\n verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[\n 0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model\n .device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev',\n to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod',\n to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod',\n to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod',\n to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas',\n np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *\n (1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps',\n sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n **kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(\n 0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[\n 0]\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b, ), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n **kwargs):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat(\n [unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n elif isinstance(c[k], torch.Tensor):\n c_in[k] = torch.cat(\n [unconditional_conditioning[k], c[k]])\n else:\n assert c[k] == unconditional_conditioning[k]\n c_in[k] = c[k]\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(\n torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in,\n c_in).chunk(2)\n # model_t = self.model.apply_model(x, t, c, **kwargs)\n # model_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n model_output = model_uncond + unconditional_guidance_scale * (\n model_t - model_uncond)\n\n if self.model.parameterization == 'v':\n print('using v!')\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == 'eps', 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c,\n **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1),\n sqrt_one_minus_alphas[index],\n device=device)\n\n # current prediction for x_0\n if self.model.parameterization != 'v':\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device,\n repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape)\n * noise)\n\n @torch.no_grad()\n def decode(self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n **kwargs):\n\n timesteps = np.arange(self.ddpm_num_timesteps\n ) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0], ),\n step,\n device=x_latent.device,\n dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n return x_dec" }, { "identifier": "DPMSolverSampler", "path": "ldm/models/diffusion/dpm_solver/sampler.py", "snippet": "class DPMSolverSampler(object):\n\n def __init__(self, model, **kwargs):\n super().__init__()\n self.model = model\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.\n device)\n self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')\n\n device = self.model.betas.device\n if x_T is None:\n img = torch.randn(size, device=device)\n else:\n img = x_T\n\n ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)\n\n model_fn = model_wrapper(\n lambda x, t, c: self.model.apply_model(x, t, c),\n ns,\n model_type='noise',\n guidance_type='classifier-free',\n condition=conditioning,\n unconditional_condition=unconditional_conditioning,\n guidance_scale=unconditional_guidance_scale,\n )\n\n dpm_solver = DPM_Solver(\n model_fn, ns, predict_x0=True, thresholding=False)\n x = dpm_solver.sample(\n img,\n steps=S,\n skip_type='time_uniform',\n method='multistep',\n order=2,\n lower_order_final=True)\n\n return x.to(device), None" }, { "identifier": "PLMSSampler", "path": "ldm/models/diffusion/plms.py", "snippet": "class PLMSSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n def make_schedule(self,\n ddim_num_steps,\n ddim_discretize='uniform',\n ddim_eta=0.,\n verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[\n 0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model\n .device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev',\n to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod',\n to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod',\n to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod',\n to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas',\n np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *\n (1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps',\n sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(\n 0, timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[\n 0]\n print(f'Running PLMS Sampling with {total_steps} timesteps')\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b, ), step, device=device, dtype=torch.long)\n ts_next = torch.full((b, ),\n time_range[min(i + 1,\n len(time_range) - 1)],\n device=device,\n dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps,\n t_next=ts_next)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n old_eps=None,\n t_next=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in,\n c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (\n e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == 'eps'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c,\n **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1),\n alphas_prev[index],\n device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1),\n sqrt_one_minus_alphas[index],\n device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device,\n repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2]\n - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t" }, { "identifier": "CrossAttention", "path": "ldm/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n\n def __init__(self,\n query_dim,\n context_dim=None,\n heads=8,\n dim_head=64,\n dropout=0.):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h),\n (q, k, v))\n\n sim = einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1, ) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule,\n n_timestep,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3):\n if schedule == 'linear':\n betas = (\n torch.linspace(\n linear_start**0.5,\n linear_end**0.5,\n n_timestep,\n dtype=torch.float64)**2)\n\n elif schedule == 'cosine':\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep\n + cosine_s)\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == 'sqrt_linear':\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == 'sqrt':\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64)**0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1, ) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(\n self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(\n self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar\n + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, 'at least one argument must be a Tensor'\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (-1.0 + logvar2 - logvar1 + torch.exp(logvar1 - logvar2) +\n ((mean1 - mean2)**2) * torch.exp(-logvar2))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n 'num_updates',\n torch.tensor(0, dtype=torch.int)\n if use_num_upates else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) /\n (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(\n m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay *\n (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(\n shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(\n f'{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.'\n )\n return total_params" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "filter_nan_loss", "path": "ldm/util.py", "snippet": "def filter_nan_loss(loss):\n fake_loss = torch.isnan(loss)\n loss = loss[torch.logical_not(fake_loss)]\n\n if loss.shape[0] == 0:\n return loss.sum()\n else:\n return loss" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not 'target' in config:\n\n print(config)\n if config == '__is_first_stage__':\n return None\n elif config == '__is_unconditional__':\n return None\n raise KeyError('Expected key `target` to instantiate.')\n return get_obj_from_str(config['target'])(**config.get('params', dict()))" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=20):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new('RGB', wh, color='white')\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(10 * (wh[0] / 256))\n lines = '\\n'.join(xc[bi][start:start + nc]\n for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill='black', font=font)\n except UnicodeEncodeError:\n print('Cant encode string for logging. Skipping.')\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import pdb import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F from contextlib import contextmanager from functools import partial from einops import rearrange, repeat from ldm.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface) from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.modules.attention import CrossAttention from ldm.modules.diffusionmodules.util import (extract_into_tensor, make_beta_schedule, noise_like) from ldm.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl) from ldm.modules.ema import LitEma from ldm.util import (count_params, default, exists, filter_nan_loss, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat) from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities.rank_zero import rank_zero_only
16,038
return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log['inputs'] = x log['reconstruction'] = decode_show(xrec) if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, 'decode'): xc = self.cond_stage_model.decode(c) log['conditioning'] = xc elif self.cond_stage_key in ['caption']: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch['caption']) log['conditioning'] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch['human_label']) log['conditioning'] = xc elif isimage(xc): log['conditioning'] = xc if ismap(xc): log['original_conditioning'] = self.to_rgb(xc) if plot_diffusion_rows: # z_noise space # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): # 200 if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack( diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid( diffusion_grid, nrow=diffusion_row.shape[0]) log['diffusion_row'] = diffusion_grid[None] if sample: # get denoise row with self.ema_scope('Plotting'): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, **kwargs['sampler_kwargs']) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) # only_depth log['samples'] = decode_show(x_samples) if plot_denoise_rows: # C H W denoise_grid = self._get_denoise_row_from_list( z_denoise_row['pred_x0']) log['denoise_row'] = decode_show(denoise_grid[None, ])[0] if plot_progressive_rows: with self.ema_scope('Plotting Progressives'): img, progressives = self.progressive_denoising( c, shape=(self.channels, self.image_size, self.image_size), batch_size=N) prog_row = self._get_denoise_row_from_list( progressives, desc='Progressive Generation') log['progressive_row'] = prog_row if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log @torch.no_grad() def sample_imgs(self, batch, ddim_steps=50, scale=7.5, uc=None, use_ddim=True, ddim_eta=1., solver='plms'): ''' test sample image; from coco caption ''' def decode_show(img): return img log = dict() N = len(batch['caption']) print(batch['caption']) z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = x.shape[0] uc = self.get_learned_conditioning(N * ['']) with self.ema_scope('Plotting'): if sovler == 'plms':
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers this is without vae ddpm -- merci """ try: except: __conditioning_keys__ = { 'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y' } def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class anneal_identity(): def __call__(self, x, global_step): return x def upper_bound(arr, key): left = 0 right = len(arr) while left < right: mid = (left + right) >> 1 if arr[mid] < key: left = mid + 1 else: right = mid return left class anneal_warmup(): def __init__(self, anneal_ratio, anneal_global_step, num_steps): self.anneal_ratio = anneal_ratio self.anneal_global_step = anneal_global_step self.steps = num_steps // (len(anneal_global_step) + 1) self.start_steps = self.steps def __call__(self, x, global_step): if (torch.rand(1) > self.anneal_ratio).item(): return x else: return int(self.start_steps + self.start_steps * upper_bound(self.anneal_global_step, global_step)) class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule='linear', loss_type='l2', ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor='val/loss', use_ema=True, first_stage_key='image', image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization='eps', # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., anneal_t=False, # we find at the begining, smaller t, larger denoise mse loss. anneal_global_step=[], anneal_ratio=0.9, prior_model=None, prior_normal=None, input_keys=['rgb'], ): super().__init__() assert parameterization in [ 'eps', 'x0' ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode' ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.input_keys = input_keys if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full( fill_value=logvar_init, size=(self.num_timesteps, )) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) ### anneal t function if not anneal_t: self.anneal_func = anneal_identity() else: self.anneal_func = anneal_warmup(anneal_ratio, anneal_global_step, self.num_timesteps) if prior_model is not None: self.prior_model = instantiate_from_config(prior_model) else: self.prior_model = None if prior_normal is not None: self.prior_normal = instantiate_from_config(prior_normal) else: self.prior_normal = None def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[ 0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( 'posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer( 'posterior_mean_coef1', to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer( 'posterior_mean_coef2', to_torch((1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == 'eps': lvlb_weights = self.betas**2 / (2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == 'x0': lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / ( 2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError('mu not supported') # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f'{context}: Switched to EMA weights') try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f'{context}: Restored training weights') def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location='cpu') if 'state_dict' in list(sd.keys()): sd = sd['state_dict'] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print('Deleting key {} from state_dict.'.format(k)) del sd[k] missing, unexpected = self.load_state_dict( sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print( f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' ) if len(missing) > 0: print(f'Missing Keys: {missing}') if len(unexpected) > 0: print(f'Unexpected Keys: {unexpected}') if self.use_ema: if len(missing) > 0: model_ema_str = sorted(missing)[-1] # missing model_ema if 'model_ema' in model_ema_str: print(f'Reinitialize model_ema') self.model_ema = LitEma(self.model) print( f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.' ) else: if self.ema_copy == True: print(f'Reinitialize model_ema') self.model_ema = LitEma(self.model) print( f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.' ) def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == 'eps': x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == 'x0': x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape( b, *((1, ) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample( img, torch.full((b, ), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss( target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == 'eps': target = noise elif self.parameterization == 'x0': target = x_start else: raise NotImplementedError( f'Paramterization {self.parameterization} not yet supported') loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0], ), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict # property of model for (to, cuda, cpu, float, half, ...) def to(self, *args, **kwargs): # type: ignore[valid-type] """See :meth:`torch.nn.Module.to`.""" # this converts `str` device to `torch.device` if self.prior_model is not None: self.prior_model.to(*args, **kwargs) if self.prior_normal is not None: self.prior_normal.to(*args, **kwargs) return super().to(*args, **kwargs) def cuda(self, device=None): # type: ignore[valid-type] """Moves all model parameters and buffers to the GPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Arguments: device: If specified, all parameters will be copied to that device. If `None`, the current CUDA device index will be used. Returns: Module: self """ if device is None: device = torch.device('cuda', torch.cuda.current_device()) elif isinstance(device, int): device = torch.device('cuda', index=device) if self.prior_model is not None: self.prior_model.cuda(device) if self.prior_normal is not None: self.prior_normal.cuda(device) return super().cuda(device=device) def cpu(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.cpu`.""" if self.prior_model is not None: self.prior_model.cpu() if self.prior_normal is not None: self.prior_normal.cpu() return super().cpu() def float(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.float`.""" if self.prior_model is not None: self.prior_model.float() if self.prior_normal is not None: self.prior_normal.float() return super().float() def double(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.double`.""" if self.prior_model is not None: self.prior_model.double() if self.prior_normal is not None: self.prior_normal.double() return super().double() def half(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.half`.""" if self.prior_model is not None: self.prior_model.half() if self.prior_normal is not None: self.prior_normal.half() return super().half() def prior_to_eval(self): if self.prior_model is not None: self.prior_model.eval() if self.prior_normal is not None: self.prior_normal.eval() @torch.no_grad() def prior_inference(self, inputs, prior_inputs): # depth prior model # midas or zoe is 384 model inputs = inputs.permute(0, 3, 1, 2) prior_results = {} self.prior_to_eval() # using depth prior if self.prior_model is not None: model_prior_results = self.prior_model(prior_inputs) prior_results.update(model_prior_results) # using normal map if self.prior_normal is not None: normal_prior_results = self.prior_normal(prior_inputs) prior_results.update(normal_prior_results) resize_prior_results = {} _, __, h, w = inputs.shape for key in prior_results.keys(): resize_prior_results[key] = F.interpolate( prior_results[key], (w, h), mode='bilinear') # add a rgb input resize_prior_results.update({'rgb': inputs}) input_container = [] for key in self.input_keys: input_container.append(resize_prior_results[key]) return torch.cat(input_container, dim=1).permute(0, 2, 3, 1) @torch.no_grad() def collect_inputs(self, batch): input_container = [] for key in self.input_keys: # [B H W C] input_container.append(batch[key]) return torch.cat(input_container, dim=-1) def training_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log( 'global_step', self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log( 'lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = { key + '_ema': loss_dict_ema[key] for key in loss_dict_ema } self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) @torch.no_grad() def test_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = { key + '_ema': loss_dict_ema[key] for key in loss_dict_ema } self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): # args: outputs, batch, batch_idx if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log['inputs'] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log['diffusion_row'] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope('Plotting'): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True) log['samples'] = samples log['denoise_row'] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key='image', cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, first_stage_ckpts=None, without_crossattn=False, ema_copy=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop('ckpt_path', None) ignore_keys = kwargs.pop('ignore_keys', []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len( first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.first_stage_ckpts = first_stage_ckpts # VAE Load self.instantiate_first_stage(first_stage_config) # CLIP load self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False self.ema_copy = ema_copy if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if self.first_stage_ckpts is not None: first_stage_ckpts = torch.load( self.first_stage_ckpts, map_location='cpu') no_match = self.first_stage_model.load_state_dict( first_stage_ckpts['state_dict'], strict=False) print('encode-decode, no match keys:\n {}'.format(no_match)) for param in self.first_stage_model.parameters(): param.requires_grad = False # lambda-stage-1 without crossattn if without_crossattn: for m in self.modules(): if isinstance(m, CrossAttention): for para in m.parameters(): para.requires_grad = False # RuntimeError: One of the differentiated Tensors does not require grad def make_cond_schedule(self, ): self.cond_ids = torch.full( size=(self.num_timesteps, ), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print('### USING STD-RESCALING ###') x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f'setting self.scale_factor to {self.scale_factor}') print('### USING STD-RESCALING ###') def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == '__is_first_stage__': print('Using first stage also as cond stage.') self.cond_stage_model = self.first_stage_model elif config == '__is_unconditional__': print( f'Training {self.__class__.__name__} as an unconditional model.' ) self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): ''' # CLIP embedding ''' if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable( self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params['clip_min_weight'], self.split_input_params['clip_max_weight'], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params['tie_braker']: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params['clip_min_tie_weight'], self.split_input_params['clip_max_tie_weight']) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting ''' @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out ''' @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.1): ''' we add uncondition prompts to improve classifer-free guidance results ''' x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) ''' encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() ''' _, _c, _h, _w = x.shape z = F.interpolate( x, (_w // 8, _h // 8), mode='bilinear', align_corners=False) if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} # To support classifier-free guidance, randomly drop out only text conditioning 10% like sd-v1.5 random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < uncond, 'n -> n 1 1') null_prompts = self.get_learned_conditioning(['']).to(c.device) cc = torch.where(prompt_mask, null_prompts, c) out = [z, cc] if return_first_stage_outputs: xrec = F.interpolate( z, (_w, _h), mode='bilinear', align_corners=False) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry( z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) uf = self.split_input_params['vqf'] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack( output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry( z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) uf = self.split_input_params['vqf'] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack( output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) df = self.split_input_params['vqf'] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): # obtain encode(x), conditon x, c = self.get_input(batch, self.first_stage_key) # ddpm loss = self(x, c) return loss def guassian_distributed(self, x, sigma=100): y = torch.exp(-(x)**2 / (2 * sigma**2)) return y / y.sum() def forward(self, x, c, *args, **kwargs): # anneal t finetune num_timesteps = self.anneal_func(self.num_timesteps, self.global_step) t = torch.randint( 0, num_timesteps, (x.shape[0], ), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample( x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, 'split_input_params'): assert len( cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in [ 'image', 'LR_image', 'segmentation', 'bbox_img', 'ic' ] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1 ) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{ c_key: [c[:, :, :, :, i]] } for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params[ 'original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2**(num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ (rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ (x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor( self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance( cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([ torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd ]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange( adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view( (o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor( [self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == 'x0': target = x_start elif self.parameterization == 'eps': target = noise else: raise NotImplementedError() loss_simple = self.get_loss( model_output, target, mean=False).mean([1, 2, 3]) valid_mask = torch.isnan(loss_simple) == False valid_t = t[valid_mask] loss_simple = filter_nan_loss(loss_simple) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[valid_t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss( model_output, target, mean=False).mean(dim=(1, 2, 3)) valid_mask = torch.isnan(loss_vlb) == False valid_t = t[valid_mask] loss_vlb = filter_nan_loss(loss_vlb) loss_vlb = (self.lvlb_weights[valid_t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model( x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == 'eps' model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == 'eps': x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == 'x0': x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning('Support dropped.') model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape( b, *((1, ) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * ( 0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * ( 0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = [c[:batch_size] for c in cond] if isinstance( cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm( reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed(range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b, ), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample( x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm( reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2: 3] # spatial size has to match for i in iterator: ts = torch.full((b, ), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample( x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = [c[:batch_size] for c in cond] if isinstance( cond, list) else cond[:batch_size] return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): ''' samples_ddim, _ = sampler.sample(S=opt.ddim_steps, conditioning=c, batch_size=opt.n_samples, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code) ''' if ddim: sampler_type = kwargs.pop('type') if sampler_type == 'dpmsolver': ddim_sampler = DPMSolverSampler(self) elif sampler_type == 'ddim': ddim_sampler = DDIMSampler(self) elif sampler_type == 'plms': ddim_sampler = DDIMSampler(self) else: raise NotImplementedError if 'unconditional_guidance_scale' in kwargs: scale = kwargs['unconditional_guidance_scale'] if scale != 1.0: if not cond.shape[1] == 1: # prompt condition uc = self.get_learned_conditioning(batch_size * ['']) else: # image condition uc = torch.zeros_like(cond) kwargs['unconditional_conditioning'] = uc shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_rgbd(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=False, plot_denoise_rows=False, plot_progressive_rows=False, plot_diffusion_rows=True, use_ddim=True, **kwargs): def decode_show(img): return img if batch['image'].shape[-1] == 3: if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] use_ddim = use_ddim log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log['inputs'] = x log['reconstruction'] = decode_show(xrec) if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, 'decode'): xc = self.cond_stage_model.decode(c) log['conditioning'] = xc elif self.cond_stage_key in ['caption']: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch['caption']) log['conditioning'] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch['human_label']) log['conditioning'] = xc elif isimage(xc): log['conditioning'] = xc if ismap(xc): log['original_conditioning'] = self.to_rgb(xc) if plot_diffusion_rows: # z_noise space # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): # 200 if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack( diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid( diffusion_grid, nrow=diffusion_row.shape[0]) log['diffusion_row'] = diffusion_grid[None] if sample: # get denoise row with self.ema_scope('Plotting'): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, **kwargs['sampler_kwargs']) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) # only_depth log['samples'] = decode_show(x_samples) if plot_denoise_rows: # C H W denoise_grid = self._get_denoise_row_from_list( z_denoise_row['pred_x0']) log['denoise_row'] = decode_show(denoise_grid[None, ])[0] if plot_progressive_rows: with self.ema_scope('Plotting Progressives'): img, progressives = self.progressive_denoising( c, shape=(self.channels, self.image_size, self.image_size), batch_size=N) prog_row = self._get_denoise_row_from_list( progressives, desc='Progressive Generation') log['progressive_row'] = prog_row if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log @torch.no_grad() def sample_imgs(self, batch, ddim_steps=50, scale=7.5, uc=None, use_ddim=True, ddim_eta=1., solver='plms'): ''' test sample image; from coco caption ''' def decode_show(img): return img log = dict() N = len(batch['caption']) print(batch['caption']) z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = x.shape[0] uc = self.get_learned_conditioning(N * ['']) with self.ema_scope('Plotting'): if sovler == 'plms':
sampler = PLMSSampler(self)
5
2023-12-06 07:29:34+00:00
24k
RobertCsordas/moe_attention
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: bool, layers: List[torch.nn.Module], n_prev_states: int,\n n_prev_states_test: Optional[int] = None, adaptive_cutoffs: List[int] = [],\n same_length_eval: bool = True, norm_before_output: bool = False,\n p_drop_layer: float = 0.0, use_last_state: bool = False, same_length: bool = False):\n\n super().__init__()\n\n self.embedding = torch.nn.Embedding(voc_size, embedding_size or state_size)\n torch.nn.init.kaiming_normal_(self.embedding.weight, mode=\"fan_in\", nonlinearity=\"linear\")\n\n self.shared_layers = all([la is layers[0] for la in layers])\n\n if embedding_size is None:\n self.embedding_adapter = lambda x: x\n else:\n self.embedding_adapter = torch.nn.Linear(embedding_size, state_size)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.layers = layers\n self.unique_layers = torch.nn.ModuleList(unique_obejcts(layers))\n self.output_adapter = lambda x: x\n self.n_prev_states = n_prev_states\n self.n_prev_states_test = n_prev_states_test or n_prev_states\n self.same_length_eval = same_length_eval\n self.embedding_scale = math.sqrt(state_size)\n self.p_drop_layer = p_drop_layer\n self.use_last_state = use_last_state\n self.same_length = same_length\n self.iter = 0\n\n self.adaptive = bool(adaptive_cutoffs)\n\n out_proj_size = (embedding_size or state_size) if tied_embedding else state_size\n if self.adaptive:\n self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss(\n out_proj_size, voc_size, adaptive_cutoffs, div_value=1,\n tied_to=self.embedding if tied_embedding else None)\n else:\n self.output = torch.nn.Linear(out_proj_size, voc_size)\n\n if norm_before_output:\n self.out_norm = torch.nn.LayerNorm(state_size)\n else:\n self.out_norm = lambda x: x\n\n if tied_embedding:\n if not self.adaptive:\n self.output.weight = self.embedding.weight\n if embedding_size is not None:\n self.output_adapter = torch.nn.Linear(state_size, embedding_size)\n\n @staticmethod\n def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor:\n return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1)\n\n def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor:\n net = self.out_norm(x)\n net = self.output_adapter(net)\n net = self.dropout(net)\n\n if self.adaptive:\n net = self.output(net.transpose(0, 1), target)\n else:\n net = self.output(net.transpose(0, 1))\n\n return net\n\n def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]:\n causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device)\n\n net = self.dropout(self.embedding(x.T.long()))\n net = self.embedding_adapter(net)\n net = net * self.embedding_scale\n\n new_state = []\n features = [net]\n\n n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test\n\n same_length = self.same_length or ((not self.training) and self.same_length_eval)\n if same_length and state is not None:\n causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \\\n [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask]\n causality_mask = torch.cat(causality_mask, -1)\n\n\n plot_cossim = (self.iter % 100 == 0 and self.training)\n for li, l in enumerate(self.layers):\n if n_prev_states > 0:\n if li == 0:\n # Pos offset should be constant for all layers\n pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0\n\n # Concatenate the new state with the previous states\n li_r = -1 if self.use_last_state else li\n s = (state[li_r] + [net]) if state is not None else [net]\n attend_to = torch.cat(s, 1)\n\n if not self.use_last_state:\n s[-1] = s[-1].detach()\n new_state.append(s[-n_prev_states:])\n else:\n pos_offset = None\n attend_to = None\n\n net_o = l(net, mask=AttentionMask(None, causality_mask), attend_to=attend_to,\n pos_offset=pos_offset)\n\n if plot_cossim:\n features.append(net_o)\n\n with torch.no_grad():\n ndiff = torch.norm(net_o - net, p=2, dim=-1)\n n_in = torch.norm(net, p=2, dim=-1)\n self.log(f\"activation_norm/abs_update_layer_{li}\", ndiff.mean())\n self.log(f\"activation_norm/in_layer_{li}\", n_in.mean())\n self.log(f\"activation_norm/rel_update_layer_{li}\", (ndiff/n_in.clamp(min=torch.finfo(n_in.dtype).eps)).mean())\n\n if self.training and self.p_drop_layer > 0.0:\n net = torch.where(torch.rand_like(net_o[..., 0:1]) < self.p_drop_layer, net, net_o)\n else:\n net = net_o\n\n if self.use_last_state and n_prev_states > 0:\n # If we carry over the last state, save it here\n new_state = [((state[0] if state is not None else []) + [net.detach()])[-n_prev_states:]]\n\n if plot_cossim:\n with torch.no_grad():\n f_sample = [f.view(-1, f.shape[-1])[:1024] for f in features]\n f_sample_all = torch.stack(f_sample, -2)\n scores = framework.utils.cossim(f_sample_all, f_sample_all).mean(0)\n self.log(\"feature_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n outs = F.softmax(self.gen_output(f_sample_all, target).transpose(0, 1), -1)\n scores = framework.utils.cossim(outs, outs).mean(0)\n self.log(\"out_dist_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n real_out = outs[:, -1]\n for i in range(outs.shape[-2] - 1):\n self.log(f\"out_diff_{i}\", (outs[:, i] - real_out).norm(dim=-1, p=1).mean())\n\n del outs\n\n\n del features\n\n net = self.gen_output(net, target)\n self.iter += 1\n\n return net, new_state" }, { "identifier": "task", "path": "tasks/task_db.py", "snippet": "def task(name: Optional[str] = None):\n def wrapper(cls):\n n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))\n assert n not in TASKS, f\"Task {n} already exists\"\n TASKS[n] = cls\n return cls\n return wrapper" }, { "identifier": "args", "path": "tasks/task_db.py", "snippet": "def args(fn):\n global ARGS_REGISTERS\n ARGS_REGISTERS.append(fn)\n return fn" }, { "identifier": "RelativeTransformerEncoderLayer", "path": "layers/transformer/relative_transformer.py", "snippet": "class RelativeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, test_pos_clamp: Optional[int] = None, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, ln_after_attention: bool = True):\n super().__init__()\n self.ln_after_attention = ln_after_attention\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n if ln_after_attention:\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.self_attn(src, attend_to if attend_to is not None else src, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src = self.norm1(src) if self.ln_after_attention else src\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "PrelnRelativeTransformerEncoderLayer", "path": "layers/transformer/relative_preln_transformer.py", "snippet": "class PrelnRelativeTransformerEncoderLayer(RelativeTransformerEncoderLayer):\n is_preln = True\n\n def __init__(self, d_model, nhead, n_layers: int, dim_feedforward=2048, dropout=0.1,\n activation: ActivationFunction = F.relu, attention_dropout=0, test_pos_clamp: Optional[int] = None,\n drop_expand: bool = True, head_projection_size: Optional[int] = None):\n super().__init__(\n d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout,\n activation=activation, attention_dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n drop_expand=drop_expand, head_projection_size=head_projection_size)\n\n reset_prenorm_params(self, n_layers)\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n return src" }, { "identifier": "RelativeMoeTransformerEncoderLayer", "path": "layers/transformer/relative_moe_transformer.py", "snippet": "class RelativeMoeTransformerEncoderLayer(LoggingLayer, torch.nn.Module):\n def __init__(self, d_model, nhead, n_experts: int, expert_size: int, n_layers: int,\n dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None,\n dropout_mode: str = \"none\", selection_mode: str = \"add\",\n perplexity_reg: float = 0.0,\n n_heads: int = 1, norm_keys: bool = False, perplexity_reg_mode: str=\"step\",\n n_random: int = 0, reg_type: str = \"normal\",\n topk_mode: str = \"full\", head_projection_size: Optional[int] = None,\n activation_after_topk: bool = False,\n drop_parallel: bool = True,\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n identical_init: bool = False,\n sel_norm: str = \"none\",\n preln: bool = True, ln_affine: bool = True,\n moe_dropout_factor: float = 1.0,\n drop_expert: float = 0.0, sync_distributed: bool = True,\n modulation_amplitude: float = 0.5, moe_init_scale: float = 1.0,\n moe_att_n_experts: int = 4, moe_att_expert_dropout: Optional[float] = None,\n moe_att_selection_mode: str = \"sigmoid\",\n moe_att_k: Optional[int] = None, moe_att_ppl_reg: Optional[float] = None,\n q_expert: bool = True, k_expert: bool = True, v_expert: bool = True,\n o_expert: bool = True,\n v_projection_size: Optional[int] = None,\n qside_n_experts: Optional[int] = None,\n moe_attention: bool = False, moe_att_variant: str = \"full\",\n moe_att_shared_experts: bool = False,\n moe_att_kq_n_experts: Optional[int] = None, moe_att_separate_kq_sel: bool = False,\n moe_att_norm_init: bool = False, moe_att_same_sel: bool = False, moe_att_norm_retrieval: bool = False,\n rotate_fraction: float = 0.5, rope_base: float = 10000):\n super().__init__()\n self.preln = preln\n self.i = 0\n\n if moe_attention:\n if moe_att_variant == \"full\":\n self.self_attn = FullMoeRelativeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts,\n perplexity_reg=perplexity_reg if moe_att_ppl_reg is None else moe_att_ppl_reg,\n expert_dropout=drop_expert if moe_att_expert_dropout is None else moe_att_expert_dropout,\n selection_mode=moe_att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n moe_k=n_heads if moe_att_k is None else moe_att_k, o_expert=o_expert, qside_n_experts=qside_n_experts,\n v_projection_size=v_projection_size, shared_experts=moe_att_shared_experts,\n kq_n_experts=moe_att_kq_n_experts, separate_kq_sel=moe_att_separate_kq_sel,\n normalize_init=moe_att_norm_init,\n same_sel=moe_att_same_sel, normalize_retrieval=moe_att_norm_retrieval,\n )\n elif moe_att_variant == \"full_rope\":\n self.self_attn = FullMoeRopeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts,\n perplexity_reg=perplexity_reg if moe_att_ppl_reg is None else moe_att_ppl_reg,\n expert_dropout=drop_expert if moe_att_expert_dropout is None else moe_att_expert_dropout,\n selection_mode=moe_att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n moe_k=n_heads if moe_att_k is None else moe_att_k, o_expert=o_expert, qside_n_experts=qside_n_experts,\n v_projection_size=v_projection_size, shared_experts=moe_att_shared_experts,\n kq_n_experts=moe_att_kq_n_experts, separate_kq_sel=moe_att_separate_kq_sel,\n normalize_init=moe_att_norm_init, normalize_retrieval=moe_att_norm_retrieval,\n rotate_fraction=rotate_fraction, rope_base=rope_base,\n )\n else:\n raise ValueError(f\"Unknown attention variant {moe_att_variant}\")\n else:\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n\n std_scale = math.sqrt(2.0 / n_layers) if preln else 1.0\n std_scale *= math.sqrt(moe_init_scale)\n\n self.pkm = MoE(\n d_model, n_experts, expert_size, dropout=dropout * moe_dropout_factor, dropout_mode=dropout_mode,\n weight_scale=std_scale, selection_mode=selection_mode,\n perplexity_reg=perplexity_reg, n_heads=n_heads,\n norm_keys=norm_keys, perplexity_reg_mode=perplexity_reg_mode, n_random=n_random,\n reg_type=reg_type, topk_mode=topk_mode,\n activation_after_topk=activation_after_topk,\n activation=activation,\n normalize_expert_sel_init=normalize_expert_sel_init, norm_key_init=norm_key_init,\n norm_value_init=norm_value_init, identical_init=identical_init,\n sel_norm=sel_norm,\n expert_dropout=drop_expert,\n sync_distributed=sync_distributed,\n modulation_amplitude=modulation_amplitude)\n\n self.norm1 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.norm2 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.dropout = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.drop_parallel = drop_parallel\n\n if preln:\n reset_prenorm_params(self, n_layers)\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n\n src2 = self.norm1(src) if self.preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout(src2)\n\n if self.preln:\n src2 = self.norm2(src)\n else:\n src = src2 = self.norm1(src)\n\n src3 = self.pkm(src2)\n\n src = src + self.dropout(src3)\n if not self.preln:\n src = self.norm2(src)\n return src" }, { "identifier": "FastRopeTransformerEncoderLayer", "path": "layers/transformer/fast_rope_transformer.py", "snippet": "class FastRopeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, preln: bool = False, n_layers: Optional[int] = None,\n rotate_fraction: float = 0.5, rope_base: float = 10000):\n super().__init__()\n self.preln = preln\n self.self_attn = FastRopeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, rotate_fraction=rotate_fraction,\n rope_base=rope_base)\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n\n if preln:\n if n_layers is None:\n raise ValueError(\"n_layers must be specified when using preln\")\n reset_prenorm_params(self, n_layers)\n else:\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src) if self.preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n\n if self.preln:\n src2 = self.norm2(src)\n else:\n src2 = src = self.norm1(src)\n\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n\n if not self.preln:\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "MoeAttentionRelativeTransformerEncoderLayer", "path": "layers/transformer/moe_attention_relative_transformer.py", "snippet": "class MoeAttentionRelativeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, moe_att_n_experts, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, preln: bool = False, n_layers: Optional[int] = None,\n att_perplexity_reg: float = 0.0, expert_dropout: float = 0.0, att_selection_mode=\"sigmoid\",\n attention_variant=\"moa\", q_expert: bool = True, k_expert: bool = True, v_expert: bool = True,\n o_expert: bool = True, moe_k: int = 2,\n norm_qk_score: bool = False, v_projection_size: Optional[int] = None, same_sel: bool = False,\n qside_n_experts: Optional[int] = None, shared_experts: bool = False,\n kq_n_experts: Optional[int] = None, separate_kq_sel: bool = False,\n cvloss: float = 0.0, switchloss: float = 0.0, zloss: float = 0.0,\n moa_mode: str = \"my\", rotate_fraction: float = 0.5, rope_base: float = 10000,\n moeatt_norm_init: bool = False):\n super().__init__()\n self.is_preln = preln\n if attention_variant not in {\"full\", \"full_rope\"} and (not q_expert):\n raise ValueError(\"q_expert can be disabled only when using qside attention\")\n\n if attention_variant == \"moa\":\n self.self_attn = MoA(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts, perplexity_reg=att_perplexity_reg, expert_dropout=expert_dropout,\n selection_mode=att_selection_mode, mode=moa_mode, cvloss=cvloss, switchloss=switchloss, zloss=zloss\n )\n elif attention_variant == \"full\":\n self.self_attn = FullMoeRelativeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts, perplexity_reg=att_perplexity_reg, expert_dropout=expert_dropout,\n selection_mode=att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n norm_qk_score=norm_qk_score, v_projection_size=v_projection_size, same_sel=same_sel,\n o_expert=o_expert, moe_k=moe_k, qside_n_experts=qside_n_experts,\n shared_experts=shared_experts, kq_n_experts=kq_n_experts, separate_kq_sel=separate_kq_sel,\n normalize_init=moeatt_norm_init\n )\n elif attention_variant == \"full_rope\":\n self.self_attn = FullMoeRopeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts, perplexity_reg=att_perplexity_reg, expert_dropout=expert_dropout,\n selection_mode=att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n norm_qk_score=norm_qk_score, v_projection_size=v_projection_size, same_sel=same_sel,\n o_expert=o_expert, moe_k=moe_k, qside_n_experts=qside_n_experts,\n shared_experts=shared_experts, kq_n_experts=kq_n_experts, separate_kq_sel=separate_kq_sel,\n rotate_fraction=rotate_fraction, rope_base=rope_base,\n normalize_init=moeatt_norm_init\n )\n else:\n raise ValueError(f\"Unknown attention variant: {attention_variant}\")\n\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n\n if preln:\n if n_layers is None:\n raise ValueError(\"n_layers must be specified when using preln\")\n reset_prenorm_params(self, n_layers)\n else:\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src) if self.is_preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n\n if self.is_preln:\n src2 = self.norm2(src)\n else:\n src2 = src = self.norm1(src)\n\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n\n if not self.is_preln:\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "MoE", "path": "layers/moe_layer.py", "snippet": "class MoE(LoggingLayer, RegularizedLayer, OncePerIterLayer, torch.nn.Module):\n def __init__(self, dmodel: int, n_experts: int, expert_size: int, n_heads: int,\n dropout: float = 0, weight_scale: float = 1.0,\n dropout_mode: str = \"none\", selection_mode: str = \"sigmoid\", perplexity_reg: float = 0.0,\n norm_keys: bool = False,\n perplexity_reg_mode: str=\"step\", n_random: int = 0, reg_type: str = \"entropy\",\n topk_mode: str = \"full\", activation_after_topk: bool = False,\n activation = lambda x: F.relu(x, inplace=True),\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n identical_init: bool = False,\n rescale_normed: bool = False, sel_norm: str = \"none\",\n v_dim: Optional[int] = None,\n expert_dropout: float = 0.0,\n sync_distributed: bool = False,\n modulation_amplitude: float = 0.5,\n ppl_past_blocks: int = 0):\n\n super().__init__()\n self.k_dim = dmodel\n self.v_dim = v_dim if v_dim is not None else dmodel\n self.n_experts = n_experts\n self.expert_size = expert_size\n self.size = self.n_experts * self.expert_size\n self.dropout = dropout\n self.dropout_mode = dropout_mode\n self.selection_mode = selection_mode\n self.perplexity_reg = perplexity_reg\n self.k_vec_dim = self.k_dim\n self.n_heads = n_heads\n self.norm_keys = norm_keys\n self.perplexity_reg_mode = perplexity_reg_mode\n self.n_random = n_random\n self.reg_type = reg_type\n self.topk_mode = topk_mode\n self.activation_after_topk = activation_after_topk\n self.activation = activation\n self.weight_scale = weight_scale\n self.normalize_expert_sel_init = normalize_expert_sel_init\n self.norm_key_init = norm_key_init\n self.norm_value_init = norm_value_init\n self.identical_init = identical_init\n self.layer = 0\n self.initalized = False\n self.rescale_normed = rescale_normed\n self.sel_norm = sel_norm\n self.was_training = True\n self.expert_dropout = expert_dropout\n self.reg_counts = 0\n self.sync_distributed = sync_distributed and torch.distributed.is_initialized()\n self.modulation_amplitude = modulation_amplitude\n self.record_all_expert_sel_counts = False\n self.ppl_past_blocks = ppl_past_blocks\n self.blocks_for_ppl = []\n self.recorded_inputs = []\n\n self.coocurence = None\n\n assert self.selection_mode in {\"gate\", \"sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"sinkhorn_local\", \"mul\", \"sinkmoid2\", \"sinkmax2\"}\n assert self.perplexity_reg_mode in {\"step\", \"global\", \"time\", \"global_time\"}\n assert self.dropout_mode in {\"none\", \"score\"}\n assert self.reg_type in {\"perplexity\", \"variance\", \"entropy\", \"l2\", \"switch\"}\n assert self.topk_mode in {\"full\", \"l1_approx\", \"approx\"}\n assert self.sel_norm in {\"none\", \"cos\", \"input\", \"weights\"}\n\n self.register_buffer(\"iter\", torch.tensor(0, dtype=torch.int64), persistent=False)\n\n if selection_mode in {\"mul\"} and activation_after_topk:\n raise ValueError(\"Activation after topk is not supported with mul selection\")\n\n self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))\n\n self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))\n\n self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))\n self.sel = lambda x: F.linear(x, self.expert_sel)\n\n torch.nn.init.normal_(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_scale)\n torch.nn.init.normal_(self.keys, std=dmodel ** -0.5 * weight_scale)\n torch.nn.init.normal_(self.values, std=self.size ** -0.5 * weight_scale)\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.sel_count_log = None\n\n self.all_expert_sel_counts = []\n self.all_expert_sel_soft = []\n\n self.register_buffer(\"kv_sel_counts\", torch.zeros(self.n_experts, self.expert_size), persistent=False)\n self.register_buffer(\"kv_sel_counts_100\", torch.zeros_like(self.kv_sel_counts))\n\n if self.rescale_normed and self.sel_norm != \"none\":\n self.sel_scale = torch.nn.Parameter(torch.ones([1]))\n else:\n self.sel_scale = 1.0\n\n self.register_buffer(\"seq\", torch.arange(max(self.n_heads, self.n_experts, self.k_dim, self.v_dim), dtype=torch.long), persistent=False)\n self.regroup_weights()\n\n if self.ppl_past_blocks > 0 and self.reg_type not in {\"perplexity\", \"entropy\"}:\n print(f\"Warning: ppl_past_blocks>0 (currently {self.ppl_past_blocks}) is only supported with perplexity and entropy regularization\")\n\n def keys_to_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n k = keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n return k.permute(0, 2, 1).contiguous().view(-1, self.k_vec_dim)\n\n def keys_from_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n return keys.view(self.n_experts, self.expert_size, self.k_vec_dim).permute(0, 2, 1).contiguous().view(self.n_experts * self.k_vec_dim, self.expert_size)\n\n def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):\n with torch.no_grad():\n std = weight.std()\n weight.div_(weight.norm(dim=dim, keepdim=True))\n weight.mul_(std / weight.std())\n\n def regroup_weights(self) -> Optional[torch.Tensor]:\n with torch.no_grad():\n if self.norm_key_init:\n self.renorm_keep_std(self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size), dim=1)\n\n if self.norm_value_init:\n self.renorm_keep_std(self.values, dim=1)\n\n if self.identical_init:\n k = self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n self.keys.set_(k[:1].expand_as(k).reshape_as(self.keys))\n\n v = self.values.view(self.n_experts, self.expert_size, self.v_dim)\n self.values.set_(v[:1].expand_as(v).reshape_as(self.values))\n\n if self.normalize_expert_sel_init:\n self.renorm_keep_std(self.expert_sel, dim=1)\n\n def ani(self, x: torch.Tensor) -> torch.Tensor:\n assert x.ndim == 2\n chunk_size = 32\n\n xnorm = F.normalize(x, 2, dim=-1)\n\n accu = 0\n for i in range(0, x.shape[0], chunk_size):\n a = xnorm[i: i + chunk_size]\n sims = xnorm @ a.T\n sims[i : i + chunk_size].fill_diagonal_(0)\n accu += sims.sum()\n\n return accu / (x.shape[0] * (x.shape[0] - 1))\n\n def log_expert_sel_usage(self, prefix: str, channel_sel_counts: torch.Tensor):\n sel_nonzero = (channel_sel_counts != 0).type(torch.float).sum(axis=-1) / self.expert_size\n self.log(f\"{prefix}/mean\", sel_nonzero.mean())\n self.log(f\"{prefix}/min\", sel_nonzero.min())\n self.log(f\"{prefix}/max\", sel_nonzero.max())\n\n\n def pre_train_forward(self):\n if self.norm_keys:\n with torch.no_grad():\n self.keys.div_(self.keys.norm(dim=-1, keepdim=True))\n\n if self.training and not self.was_training:\n sorted_counts = self.index_sel_counts.sort(descending=True).values\n self.log(\"test_exert_channel_usage\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n self.layer = 0\n if self.sel_hist:\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n self.reg_counts = 0\n\n def before_loss(self):\n if self.sel_hist:\n # Concatenate against time dimension. Important for the within-batch regularization\n sel = torch.cat(self.sel_hist, -2)\n self.add_perplexity_reg(sel)\n\n self.sel_hist = []\n\n if self.index_sel_norm > 0:\n if self.training:\n with torch.no_grad():\n self.log(\"usag_rel_perplexity_all_layers\", utils.relative_perplexity(self.index_sel_counts / self.index_sel_norm))\n self.log(\"dead_expert_proportion_all_layers\", (self.index_sel_counts == 0).float().sum() / self.n_experts)\n\n self.log_expert_sel_usage(\"exert_channel_usage\", self.kv_sel_counts)\n\n self.kv_sel_counts_100.add_(self.kv_sel_counts)\n self.kv_sel_counts.zero_()\n\n self.index_sel_counts_100 = self.index_sel_counts_100 + self.index_sel_counts\n self.index_sel_norm_100 = self.index_sel_norm_100 + self.index_sel_norm\n\n if self.training and self.iter % 100 == 0:\n norm_cnt = self.index_sel_counts_100 / self.index_sel_norm_100\n self.log(\"usag_rel_perplexity_100\", utils.relative_perplexity(norm_cnt))\n self.log(\"dead_expert_proportion_100\", (self.index_sel_counts_100 == 0).float().sum() / self.n_experts)\n\n sorted_counts = self.index_sel_counts_100.sort(descending=True).values\n self.log(\"usage_counts_100\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n\n self.log_expert_sel_usage(\"exert_channel_usage_100\", self.kv_sel_counts_100)\n self.kv_sel_counts_100.zero_()\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.log(\"ani/keys\", self.ani(self.keys_to_logical_order(self.keys)))\n self.log(\"ani/values\", self.ani(self.values.flatten(0, -2)))\n self.log(\"ani/expert_sel\", self.ani(self.expert_sel.T))\n\n if self.training:\n self.iter += 1\n\n def topk(self, x: torch.Tensor, k: int, approx: bool) -> Tuple[torch.Tensor, torch.Tensor]:\n if approx:\n x = x.view(*x.shape[:-1], k, -1)\n scores, ind = x.max(-1)\n return scores, self.seq[:k] * x.shape[-1] + ind\n else:\n return x.topk(k, dim=-1, sorted=False)\n\n def rolling_logsumexp(self, x: torch.Tensor) -> torch.Tensor:\n # Simulate calculating logsumexp over a bigger batch than the current one. Will have stale values, but that\n # should not matter much later in training.\n if self.ppl_past_blocks == 0 or not self.training:\n return F.log_softmax(x, dim=-1)\n else:\n if len(self.blocks_for_ppl) == self.ppl_past_blocks:\n self.blocks_for_ppl.pop(0)\n\n self.blocks_for_ppl.append(x)\n res = F.log_softmax(torch.cat(self.blocks_for_ppl, dim=0), dim=-1)\n self.blocks_for_ppl[-1] = self.blocks_for_ppl[-1].detach()\n return res\n\n def add_perplexity_reg(self, sel: torch.Tensor):\n sync_distributed = self.sync_distributed and (self.perplexity_reg_mode not in {\"time\", \"global_time\"})\n\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n sel = sel.flatten(0, -3)\n else:\n sel = sel.flatten(0, -2)\n\n # Note: sel are raw logits, no matter what activation is used\n if self.perplexity_reg > 0:\n if self.reg_type == \"perplexity\":\n sel_d = self.rolling_logsumexp(sel)\n sel_d = framework.utils.distributed_ops.log_mean(sel_d, -2, self.sync_distributed)\n loss = lambda: self.perplexity_reg * ( - utils.relative_perplexity_l(sel_d).mean())\n elif self.reg_type == \"entropy\":\n sel_d = self.rolling_logsumexp(sel)\n sel_d = framework.utils.distributed_ops.log_mean(sel_d, -2, self.sync_distributed)\n loss = lambda: self.perplexity_reg * ( - utils.entropy_l(sel_d).mean())\n elif self.reg_type == \"variance\":\n if sync_distributed:\n raise NotImplementedError(\"Variance regularization is not supported in distributed mode\")\n avg_sel = sel.mean(-2)\n loss = lambda: self.perplexity_reg * avg_sel.var(-1).mean()\n elif self.reg_type == \"l2\":\n loss = lambda: self.perplexity_reg * sel.pow(2).mean()\n elif self.reg_type == \"switch\":\n if sync_distributed:\n torch.distributed.all_reduce(self.reg_counts, op=torch.distributed.ReduceOp.SUM)\n\n p_sel_real = self.reg_counts / self.reg_counts.sum(-1, keepdims=True)\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n p_sel_real = p_sel_real.unsqueeze(-2)\n\n loss = lambda: self.perplexity_reg * (F.softmax(sel, dim=-1) * p_sel_real).mean()\n self.reg_counts = 0\n else:\n assert False\n\n self.add_reg(loss, \"moe\")\n\n def compute_scores(self, input: torch.Tensor, index: CVMMSel, expert_scores: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n scores = cvmm(input, index, self.keys)\n\n if self.selection_mode in {\"mul\"}:\n scores = scores * expert_scores[..., None]\n elif self.selection_mode in {\"gate\", \"sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"sinkmoid2\"}:\n # Handle it later\n pass\n\n scores = self.activation(scores)\n\n plot_training = self.train and self.iter % 10 == 0\n if plot_training:\n with torch.no_grad():\n gt0 = (scores > 0).float()\n gt0_s = gt0.sum()\n\n if plot_training:\n self.log(\"relu_pass_rate\", gt0_s / scores.numel())\n\n self.kv_sel_counts.index_add_(0, index.raw_sel.flatten(), gt0.flatten(end_dim=-2))\n\n if self.dropout > 0 and self.dropout_mode != \"none\":\n scores = F.dropout(scores, self.dropout, training=self.training)\n\n return scores\n\n def sel_activation(self, sel: torch.Tensor, seq_len: int) -> Tuple[torch.Tensor, torch.Tensor]:\n reg_sel = sel\n if self.selection_mode in {\"sigmoid\"}:\n sel = torch.sigmoid(sel)\n elif self.selection_mode in {\"mul\"}:\n sel = sel.abs()\n reg_sel = sel\n elif self.selection_mode in {\"gate\"}:\n sel = F.softmax(sel, dim=-1)\n with torch.no_grad():\n self.log(\"expert_rel_perplexity_per_selection\", utils.relative_perplexity(sel).mean())\n else:\n assert False\n\n return sel, reg_sel\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n out = 0\n\n in1 = in2 = input\n\n sel = self.sel(in1)\n # sel=sel.float()\n\n if self.sel_norm == \"cos\":\n sel = sel / (in1.norm(dim=-1, keepdim=True) * self.expert_sel.norm(dim=-1)[None]) * self.sel_scale\n elif self.sel_norm == \"weights\":\n sel = sel * (self.sel_scale / self.expert_sel.norm(dim=-1)[None])\n elif self.sel_norm == \"input\":\n sel = sel * (self.sel_scale / in1.norm(dim=-1, keepdim=True))\n\n sel_raw = reg_sel = sel\n\n inv_val = float(\"-inf\")\n\n if not self.activation_after_topk:\n # Sinkhorn should be always applied before top-k\n sel, reg_sel = self.sel_activation(sel, input.shape[-2])\n inv_val = 0\n\n if self.training and self.expert_dropout > 0:\n if self.selection_mode not in {\"sigmoid\", \"gate\"}:\n raise ValueError(\"Expert dropout not supported in this mode\")\n\n mask = torch.rand_like(sel) < self.expert_dropout\n sel2 = sel.masked_fill(mask, inv_val)\n else:\n sel2 = sel\n\n sel_val, sel_index = self.topk(sel2, self.n_heads, self.topk_mode in {\"l1_approx\", \"approx\"})\n\n if self.activation_after_topk or (self.selection_mode in {\"mul\"}):\n sel_val = torch.gather(sel_raw, -1, sel_index)\n sel_val, reg_sel = self.sel_activation(sel_val, input.shape[-2])\n\n\n record_counts_now = (self.training and self.iter % 10 == 0) or (not self.training) or (self.record_all_expert_sel_counts)\n\n if not self.training:\n sel_index_flat = sel_index.flatten(end_dim=-2)\n if self.coocurence is None:\n self.coocurence = torch.zeros([self.n_experts, self.n_experts], device=sel_index_flat.device, dtype=torch.long)\n\n for h1 in range(self.n_heads):\n for h2 in range(self.n_heads):\n ind_flat = sel_index_flat[..., h1] * self.n_experts + sel_index_flat[..., h2]\n values = torch.tensor([1], device=self.coocurence.device, dtype=self.coocurence.dtype).expand_as(ind_flat)\n # values = sel_val[..., h2].flatten()\n self.coocurence.flatten().put_(ind_flat, values, accumulate=True)\n # self.coocurence[sel_index_flat[..., h1], sel_index_flat[..., h2]] += 1\n\n if record_counts_now or self.reg_type == \"switch\":\n reg_counts = F.one_hot(sel_index, self.n_experts).type_as(input)\n\n if self.reg_type == \"switch\":\n reg_counts2 = reg_counts.view(*input.shape[:-2], input.shape[-2] * self.n_heads, self.n_experts)\n if self.perplexity_reg_mode == \"time\":\n reg_counts2 = reg_counts2.sum(-2)\n else:\n reg_counts2 = reg_counts2.flatten(end_dim=-2).sum(0)\n\n self.reg_counts = self.reg_counts + reg_counts2\n\n if record_counts_now:\n with torch.no_grad():\n sel_counts = reg_counts.flatten(end_dim=-2).sum(0)\n cnt = sel_index.nelement()\n\n p_expert_sel = sel_counts / cnt\n\n self.index_sel_counts = self.index_sel_counts + sel_counts\n self.index_sel_norm = self.index_sel_norm + cnt\n\n if self.record_all_expert_sel_counts:\n softcnt = torch.zeros_like(sel_counts, dtype=sel_val.dtype)\n softcnt.index_add_(0, sel_index.flatten(), sel_val.flatten())\n\n self.all_expert_sel_soft.append(softcnt)\n self.all_expert_sel_counts.append(sel_counts)\n\n if self.training:\n self.log(\"min_sel_score\", sel_val.min(dim=-1).values.mean())\n self.log(\"max_sel_score\", sel_val.max(dim=-1).values.mean())\n\n sel_oh = F.one_hot(sel_index, self.n_experts).sum(-2).bool()\n if self.layer >= 1 and self.training:\n self.log(f\"layer_sel_overlap_{self.layer}\", ((self.prev_sel_oh & sel_oh).sum(-1).float() / self.n_heads).mean())\n\n self.prev_sel_oh = sel_oh\n\n ppl = utils.relative_perplexity(p_expert_sel)\n self.log(\"usage_rel_perplexity\", ppl)\n self.log(\"dead_expert_proportion\", (p_expert_sel == 0).float().sum() / self.n_experts)\n\n if self.perplexity_reg_mode in {\"step\", \"time\"}:\n self.add_perplexity_reg(reg_sel)\n elif self.perplexity_reg > 0 and self.training:\n self.sel_hist.append(reg_sel)\n\n sel_indices = cvmm_prepare_sel2(sel_index.int())\n\n scores = self.compute_scores(in2, sel_indices, sel_val)\n\n sel_indices = sel_indices.clone()\n sel_indices.reduction_weight = sel_val\n sel_indices.sel_index = sel_indices.out_index\n sel_indices.out_index = None\n\n if self.selection_mode not in {\"gate\", \"sigmoid\"}:\n sel_indices.reduction_weight = torch.ones_like(sel_indices.reduction_weight)\n\n out = cvmm(scores, sel_indices, self.values)\n\n self.layer += 1\n\n self.was_training = self.training\n res = out.view(*input.shape[:-1], self.v_dim)\n return res\n\n def dump_logs(self, save_dir: str):\n if self.coocurence is not None:\n os.makedirs(save_dir, exist_ok=True)\n torch.save(self.coocurence, os.path.join(save_dir, \"coocurence.pt\"))\n\n def get_logs(self) -> Dict[str, Any]:\n res = super().get_logs()\n\n if self.coocurence is not None:\n coo = self.coocurence / self.coocurence.diagonal().clamp(min=1)[:, None]\n res[\"expert_coocurence\"] = framework.visualize.plot.Heatmap(coo, xlabel=\"expert\", ylabel=\"expert\", textval=False)\n self.coocurence = None\n return res" }, { "identifier": "Result", "path": "interfaces/result.py", "snippet": "class Result:\n outputs: torch.Tensor\n loss: torch.Tensor\n\n batch_dim = 0\n\n def plot(self) -> Dict[str, Any]:\n return {}\n\n @property\n def batch_size(self) -> int:\n return self.outputs.shape[self.batch_dim]\n\n @staticmethod\n def merge(l: List, batch_weights: Optional[List[float]] = None):\n if len(l) == 1:\n return l[0]\n batch_weights = batch_weights if batch_weights is not None else [1] * len(l)\n loss = sum([r.loss * w for r, w in zip(l, batch_weights)]) / sum(batch_weights)\n out = torch.cat([r.outputs for r in l], l[0].batch_dim)\n return l[0].__class__(out, loss)" }, { "identifier": "LayerVisualizer", "path": "layers/layer_with_visualization.py", "snippet": "class LayerVisualizer:\n def __init__(self, module: torch.nn.Module, options: Dict[str, Any] = {}):\n self.modules = []\n self.options = options\n self.curr_options = None\n for n, m in module.named_modules():\n if isinstance(m, LayerWithVisualization):\n self.modules.append((n, m))\n\n def plot(self) -> Dict[str, Any]:\n res = {}\n for n, m in self.modules:\n res.update({f\"{n}/{k}\": v for k, v in m.plot(self.curr_options).items()})\n m.visualization_enabled = False\n\n self.curr_options = None\n return res\n\n def prepare(self, options: Dict[str, Any] = {}):\n self.curr_options = self.options.copy()\n self.curr_options.update(options)\n\n for _, m in self.modules:\n m.prepare()\n m.visualization_enabled = True" }, { "identifier": "FullMoeRelativeAttentionCore", "path": "layers/transformer/full_moe_relative_attention.py", "snippet": "class FullMoeRelativeAttentionCore(LayerWithVisualization, LoggingLayer, RegularizedLayer, OncePerIterLayer, torch.nn.Module):\n def __init__(self, state_size: int, n_heads: int, n_experts: int, dropout: float = 0.0, input_size: Optional[int] = None,\n projection_size: Optional[int] = None, output_size: Optional[int] = None, init_std_scale: float = 1.0,\n perplexity_reg: float = 0, share_pk: bool = True, expert_dropout: float = 0.0,\n selection_mode: str = \"sigmoid\", moe_k: int = 2, q_expert: bool = True,\n k_expert: bool = True, v_expert: bool = True, o_expert: bool = True, norm_qk_score: bool = False,\n v_projection_size: Optional[int] = None, same_sel: bool = False,\n qside_n_experts: Optional[int] = None, shared_experts: bool = False,\n kq_n_experts: Optional[int] = None, separate_kq_sel: bool = False,\n normalize_init: bool = False, normalize_retrieval: bool = False):\n\n super().__init__()\n\n self.input_size = input_size or state_size\n self.output_size = output_size or state_size\n self.pe_size = self.input_size\n self.perplexity_reg = perplexity_reg\n self.share_pk = share_pk\n self.expert_dropout = expert_dropout\n self.selection_mode = selection_mode\n self.iter = 0\n self.moe_k = moe_k\n self.norm_qk_score = norm_qk_score\n self.same_sel = same_sel\n self.shared_experts = shared_experts\n self.init_std_scale = init_std_scale\n self.normalize_init = normalize_init\n self.attention_to_visualize = []\n self.selections_to_visualize = {}\n\n self.is_expert = {\n \"k\": k_expert,\n \"q\": q_expert,\n \"v\": v_expert,\n \"o\": o_expert\n }\n self.n_experts = {\n \"k\": kq_n_experts or n_experts,\n \"q\": kq_n_experts or qside_n_experts or n_experts,\n \"v\": n_experts,\n \"o\": qside_n_experts or n_experts\n }\n\n self.separate_k_sel = separate_kq_sel or (self.n_experts[\"k\"] != self.n_experts[\"v\"])\n self.separate_q_sel = separate_kq_sel or (self.n_experts[\"q\"] != self.n_experts[\"o\"])\n\n self.sel_hist = {}\n self.sel_counts_100 = {}\n\n self.n_heads = n_heads\n self.dropout = torch.nn.Dropout(dropout) if dropout > 0 else lambda x: x\n self.projection_size = projection_size or (state_size // n_heads)\n self.v_projection_size = v_projection_size or self.projection_size\n\n self.std_in = init_std_scale * math.sqrt(1 / self.input_size)\n std_out = init_std_scale * math.sqrt(1 / (n_heads * self.v_projection_size))\n\n self.create_selection_logic()\n\n self.src_side_maps = {\"k\", \"v\"}\n\n self.projections = torch.nn.ParameterDict({\n \"q\": self.create_param_block(\"q\", self.input_size, self.projection_size, self.std_in),\n \"k\": self.create_param_block(\"k\", self.input_size, self.projection_size, self.std_in),\n \"v\": self.create_param_block(\"v\", self.input_size, self.v_projection_size, self.std_in),\n \"o\": self.create_param_block(\"o\", self.v_projection_size, self.output_size, std_out),\n })\n\n if normalize_retrieval:\n self.norm_ret = torch.nn.LayerNorm(self.projection_size)\n else:\n self.norm_ret = lambda x: x\n\n self.sel_correlation = 0\n\n self.register_buffer(\"scale\", torch.full([1], 1.0 / math.sqrt(self.projection_size)), persistent=False)\n\n def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):\n with torch.no_grad():\n std = weight.std()\n weight.div_(weight.norm(dim=dim, keepdim=True))\n weight.mul_(std / weight.std())\n\n def get_n_copies(self, name: str):\n return self.n_heads\n\n def create_param_block(self, name: str, in_size: int, out_size: int, std: float):\n n_copies = self.get_n_copies(name)\n\n if self.is_expert[name]:\n exp_mul = 1 if self.shared_experts else n_copies\n p = torch.nn.Parameter(torch.randn(exp_mul * self.n_experts[name], in_size, out_size) * std)\n if self.normalize_init:\n self.renorm_keep_std(p, dim=0)\n return p\n else:\n if name == \"o\":\n in_size = n_copies * in_size\n else:\n out_size = n_copies * out_size\n return torch.nn.Parameter(torch.randn(out_size, in_size) * std)\n\n def create_selection_logic(self):\n sels_params = {}\n self.sel_map = {}\n\n def register_remap(dest: str, src: str) -> bool:\n if not (src in sels_params or src in self.sel_map):\n # src is not defined\n return False\n\n assert self.n_experts[src] == self.n_experts[dest]\n self.sel_map[dest] = self.sel_map.get(src, src)\n return True\n\n if self.is_expert[\"o\"]:\n sels_params[\"o\"] = self.init_sel(\"o\", self.std_in)\n\n if self.is_expert[\"q\"] and (self.separate_q_sel or not register_remap(\"q\", \"o\")):\n sels_params[\"q\"] = self.init_sel(\"q\", self.std_in)\n\n if self.is_expert[\"v\"] and ((not self.same_sel) or not register_remap(\"v\", \"o\")):\n sels_params[\"v\"] = self.init_sel(\"v\", self.std_in)\n\n if self.is_expert[\"k\"]:\n if (not (self.same_sel and self.separate_k_sel and register_remap(\"k\", \"q\"))) and (self.separate_k_sel or not register_remap(\"k\", \"v\")):\n sels_params[\"k\"] = self.init_sel(\"k\", self.std_in)\n\n self.selections = torch.nn.ParameterDict(sels_params)\n\n def init_sel(self, name: str, std: float) -> torch.nn.Parameter:\n n_copies = self.get_n_copies(name)\n n_experts = self.n_experts[name]\n sel = torch.nn.Parameter(torch.randn(n_experts*n_copies, self.input_size) * std)\n self.renorm_rows(sel)\n return sel\n\n def renorm_rows(self, x: torch.Tensor):\n with torch.no_grad():\n std_t = x.std(dim=-1, keepdim=True)\n x.div_(x.norm(dim=-1, keepdim=True))\n x.mul_(std_t / x.std())\n\n\n def project_to_torch_order(self, x: torch.Tensor):\n return x.view(*x.shape[:-1], self.get_n_copies(\"k\"), -1).transpose(-2, -3)\n\n def get_mask_tensor(self, src_len: int, mask: Optional[AttentionMask]) -> Optional[torch.Tensor]:\n if mask is None or (mask.position_mask is None and mask.src_length_mask is None):\n return None\n\n # mask.position_mask: [..., N_out, N_in]\n # mask.src_length_mask: [B, ...., N_in]\n # True where it has to be masked\n\n if mask.position_mask is not None:\n n_pad = src_len - mask.position_mask.shape[-1]\n if n_pad > 0:\n pm = F.pad(mask.position_mask, (n_pad, 0), 'constant', value=False)\n else:\n pm = mask.position_mask\n\n if mask.position_mask is None:\n m = mask.src_length_mask.unsqueeze(-2).unsqueeze(-2)\n elif mask.src_length_mask is None:\n m = pm\n else:\n m = mask.src_length_mask.unsqueeze(-2).unsqueeze(-2) | pm\n\n return m\n\n def train(self, mode: bool = True):\n self.sel_hist = {}\n return super().train(mode)\n\n def get_lost_on_hist(self, l: List[torch.Tensor]) -> torch.Tensor:\n assert l[0].ndim == 4\n l = [t.flatten(1,2) for t in l]\n sel = torch.cat(l, -2)\n sel_d = F.log_softmax(sel, dim=-1)\n sel_d = framework.utils.distributed_ops.log_mean(sel_d, -2, sync_distributed=False)\n return self.perplexity_reg * ( - utils.entropy_l(sel_d).mean())\n\n def get_reg_loss(self) -> Dict[str, torch.Tensor]:\n l = super().get_reg_loss()\n for k, v in self.sel_hist.items():\n l[f\"moe_att_entropy/{k}\"] = self.get_lost_on_hist(v)\n\n self.sel_hist = {}\n return l\n\n def get_sel(self, t: torch.Tensor, w: torch.Tensor, name: str) -> Selection:\n n_experts = self.n_experts[name]\n n_copies = self.get_n_copies(name)\n\n sel = F.linear(t, w).float()\n sel = sel.view(*sel.shape[:-1], n_copies, -1)\n with torch.no_grad():\n if self.expert_dropout > 0 and self.training:\n mask = torch.rand_like(sel) < self.expert_dropout\n sel2 = sel.masked_fill(mask, float('-inf'))\n else:\n sel2 = sel\n _, sel_index = sel2.topk(self.moe_k, dim=-1, sorted=False)\n sel_val = torch.gather(sel, -1, sel_index)\n\n if self.selection_mode == \"softmax\":\n sel_val = sel_val.softmax(-1)\n elif self.selection_mode == \"sigmoid\":\n sel_val = sel_val.sigmoid()\n else:\n raise ValueError(\"Unknown selection mode: \" + self.selection_mode)\n\n exp_shift = 0 if self.shared_experts else n_experts\n\n sel_index_shifted = (torch.arange(n_copies, device=sel_index.device, dtype=sel_index.dtype) * exp_shift).unsqueeze(-1) + sel_index\n sel_index_pp = cvmm_prepare_sel2(sel_index_shifted.flatten(-2,-1), sel_val)\n\n return Selection(sel, sel_val, sel_index, sel_index_pp)\n\n def before_loss(self):\n self.iter += 1\n if self.iter % 100 == 0:\n for k, v in self.sel_counts_100.items():\n sorted_counts = v.sort(descending=True).values\n self.log(f\"sel_counts/{k}\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n self.sel_counts_100 = {}\n\n def exp_proj(self, x: torch.Tensor, w: torch.Tensor, sel: Selection) -> torch.Tensor:\n return cvmm(x, sel.sel_index, w)\n\n def compute_sel(self, curr_state: torch.Tensor, attend_to: torch.Tensor) -> Dict[str, Selection]:\n self.selection_mode\n outs = {}\n done = {}\n cross_atten = curr_state is not attend_to\n\n for name in (set(self.selections.keys()) | set(self.sel_map.keys())):\n name_actual = self.sel_map.get(name, name)\n\n # There coukd be 2 versions of everything: source side and destination side. Check if they are different,\n # and if not, use the cached version, my_id is the unique identifier for this transformation\n is_src_side = (name in self.src_side_maps) or not cross_atten\n my_id = (name_actual, is_src_side)\n\n cached = done.get(my_id)\n if cached is not None:\n outs[name] = cached\n continue\n\n # No cache, actually compute\n inp = attend_to if is_src_side else curr_state\n v = self.selections[name_actual]\n outs[name] = self.get_sel(inp, v, name)\n\n # Save history for regularization\n if self.perplexity_reg > 0 and self.training:\n if name not in self.sel_hist:\n self.sel_hist[name] = []\n self.sel_hist[name].append(outs[name].raw_sel)\n\n # Visualize statistics\n if self.training and self.iter % 10 == 0:\n self.sel_counts_100[name] = self.sel_counts_100.get(name, 0) + \\\n F.one_hot(outs[name].raw_sel_index.flatten(), self.n_experts[name]).sum(0)\n\n done[my_id] = outs[name]\n\n return outs\n\n def project(self, name: str, src: torch.Tensor, sel: Dict[str, Selection]) -> torch.Tensor:\n if name in sel:\n sv = sel[name]\n if self.norm_qk_score and name in {\"q\", \"k\"}:\n sv.sel_index.reduction_weight = F.normalize(sv.sel_index.reduction_weight, p=1, dim=-1)\n return self.exp_proj(src, self.projections[name], sv)\n else:\n return F.linear(src, self.projections[name])\n\n def attend(self, curr_state: torch.Tensor, attend_to: torch.Tensor, pos_offset: int, v: torch.Tensor,\n k: torch.Tensor, q: torch.Tensor, mask: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n raise NotImplementedError()\n\n def attention_proj(self, att: torch.Tensor, v: torch.Tensor,\n mask: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n if mask is not None:\n att.masked_fill_(mask, float('-inf'))\n\n att = F.softmax(att, dim=-1)\n\n res = att @ v\n return res, att\n\n def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],\n pos_offset: Optional[int] = None, need_weights: bool = False):\n # curr_state: [batch_size, out_len, c]\n # attend_to: [batch_size, in_len, c]\n\n if pos_offset is None:\n assert curr_state.shape[1] == attend_to.shape[1], \"If attend_to has different shape than curr_state, pos_offset should be provided\"\n pos_offset = 0\n\n sel = self.compute_sel(curr_state, attend_to)\n\n # scale q and k with sqrt(scale) before the attention. This should save memory, be faster, and\n # keep the range of k and v better. It should make attention NaNs better with float16.\n scale = self.scale.sqrt()\n\n q = self.project(\"q\", curr_state, sel)\n q = q * scale.type_as(q)\n k = self.project(\"k\", attend_to, sel)\n k = k * scale.type_as(k)\n v = self.project(\"v\", attend_to, sel)\n\n q = self.project_to_torch_order(q) if \"q\" not in sel else q.transpose(-2,-3)\n k = self.project_to_torch_order(k) if \"k\" not in sel else k.transpose(-2,-3)\n v = self.project_to_torch_order(v) if \"v\" not in sel else v.transpose(-2,-3)\n\n k = self.dropout(k)\n\n res, att = self.attend(curr_state, attend_to, pos_offset, v, k, q, self.get_mask_tensor(attend_to.shape[-2], mask))\n res = self.norm_ret(res)\n\n if self.visualization_enabled:\n self.attention_to_visualize.append(att[0].detach())\n for k, s in sel.items():\n if k not in self.selections_to_visualize:\n self.selections_to_visualize[k] = []\n\n with torch.no_grad():\n m = torch.zeros([*s.sel_val[0].shape[:-1], self.n_experts[k]], device=s.sel_val.device, dtype=s.sel_val.dtype)\n m.scatter_(-1, s.raw_sel_index[0], s.sel_val[0])\n\n self.selections_to_visualize[k].append(m)\n\n if self.get_n_copies(\"k\") != self.get_n_copies(\"v\"):\n res = res.view(\n *res.shape[:-1], self.get_n_copies(\"v\") // self.get_n_copies(\"k\"), -1).transpose(2,3).flatten(1,2).contiguous()\n\n if self.is_expert[\"o\"]:\n res = res.transpose(-2, -3)\n # The output selection indices are calculated from the current state and are also used for projecting \"q\".\n # But that projection needs to create multiple copies for the different heads. Here we already have the\n # heads, but we have to create copies for the top-k elements. We can calculate that from the reduction\n # weight. We also want to compute not only the weighted average between the top-k elements, but also\n # of the different heads. So reshape the reduction weight accordingly.\n o_sel = sel[\"o\"].sel_index.clone()\n o_sel.sel_index = o_sel.out_index // o_sel.reduction_weight.shape[-1]\n o_sel.reduction_weight = o_sel.reduction_weight.flatten(-2)\n out = cvmm(res, o_sel, self.projections[\"o\"])\n else:\n res = res.transpose(-2, -3)\n out = F.linear(res.contiguous().view(*curr_state.shape[:-1], -1), self.projections[\"o\"])\n\n return out\n\n def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:\n r = {}\n marks = options.get(\"steplabel\")\n n_steps = options.get(\"n_steps\") or 9999999\n y_marks = options.get(\"target_labels\", marks)\n\n ns1 = (self.attention_to_visualize[0].shape[-2] + n_steps) if n_steps < 0 else 0\n ns1_e = self.attention_to_visualize[0].shape[-2] if n_steps < 0 else n_steps\n ns2 = (self.attention_to_visualize[0].shape[-1] + n_steps) if n_steps < 0 else 0\n ns2_e = self.attention_to_visualize[0].shape[-1] if n_steps < 0 else n_steps\n\n if marks is not None:\n assert len(marks) == self.attention_to_visualize[0].shape[-1]\n marks = marks[ns2:ns2_e]\n\n if y_marks is not None:\n assert len(y_marks) == self.attention_to_visualize[0].shape[-2]\n y_marks = y_marks[ns1:ns1_e]\n\n if options.get(\"mha.plot_head_details\") and self.attention_to_visualize[0].shape[0] > 1:\n for head in range(self.attention_to_visualize[0].shape[0]):\n sel_map = {k: [e[:, head][ns1:ns1_e] if k in {'q', 'o'} else e[:, head][ns2:ns2_e] for e in v] for k, v in self.selections_to_visualize.items()}\n selections = {k: torch.stack(v, 0).cpu() for k, v in sel_map.items()}\n\n x_selections = {k: v for k, v in selections.items() if k in {'k', 'v'}}\n y_selections = {k: v for k, v in selections.items() if k in {'q', 'o'}}\n\n r[f\"head_{head}\"] = MoEAttentionPlot(\n torch.stack([layer[head][ns1:ns1_e, ns2:ns2_e] for _, layer in enumerate(self.attention_to_visualize)], 0),\n x_selections, y_selections,\n ylabel=\"dest\", xlabel=\"src\", x_marks=marks, y_marks=y_marks)\n\n r[\"attention_max\"] = framework.visualize.plot.AnimatedHeatmap(\n torch.stack([layer.max(0)[0][ns1:ns1_e, ns2:ns2_e] for _, layer in enumerate(self.attention_to_visualize)], 0),\n ylabel=\"dest\", xlabel=\"src\", textval=False, x_marks=marks, y_marks=y_marks, ignore_wrong_marks=True)\n\n self.attention_to_visualize = []\n self.selections_to_visualize = {}\n return r\n\n def dump_logs(self, save_dir: str):\n if torch.is_tensor(self.sel_correlation):\n os.makedirs(save_dir, exist_ok=True)\n torch.save(self.sel_correlation, os.path.join(save_dir, \"sel_correlation.pt\"))\n\n def get_logs(self) -> Dict[str, Any]:\n res = super().get_logs()\n\n if torch.is_tensor(self.sel_correlation):\n coo = self.sel_correlation / self.sel_correlation.flatten(1).sum(-1).clamp(min=1)[:, None, None]\n for h in range(self.n_heads):\n res[f\"expert_coocurence_{h}\"] = framework.visualize.plot.Heatmap(coo[h], xlabel=\"o expert\", ylabel=\"v expert\", textval=False)\n self.sel_correlation = 0\n return res" } ]
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.fast_rope_transformer import FastRopeTransformerEncoderLayer from layers.transformer.moe_attention_relative_transformer import MoeAttentionRelativeTransformerEncoderLayer from layers.moe_layer import MoE from interfaces import Result from layers import LayerVisualizer from layers.transformer.full_moe_relative_attention import FullMoeRelativeAttentionCore
19,587
parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dropout=self.helper.args.dropout, activation=activation ) if self.helper.args.transformer.variant not in {"preln_moe", "moe"}: base_args["dim_feedforward"]=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_moeatt"}:
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dropout=self.helper.args.dropout, activation=activation ) if self.helper.args.transformer.variant not in {"preln_moe", "moe"}: base_args["dim_feedforward"]=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_moeatt"}:
mklayer = lambda: MoeAttentionRelativeTransformerEncoderLayer(
7
2023-12-13 08:45:02+00:00
24k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/modeling/roi_heads/roi_heads.py
[ { "identifier": "configurable", "path": "nativedancer/third_part/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`from_config` function that translates\n :class:`CfgNode` to arguments.\n\n Examples:\n ::\n # Usage 1: Decorator on __init__:\n class A:\n @configurable\n def __init__(self, a, b=2, c=3):\n pass\n\n @classmethod\n def from_config(cls, cfg): # 'cfg' must be the first argument\n # Returns kwargs to be passed to __init__\n return {\"a\": cfg.A, \"b\": cfg.B}\n\n a1 = A(a=1, b=2) # regular construction\n a2 = A(cfg) # construct with a cfg\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\n\n # Usage 2: Decorator on any function. Needs an extra from_config argument:\n @configurable(from_config=lambda cfg: {\"a: cfg.A, \"b\": cfg.B})\n def a_func(a, b=2, c=3):\n pass\n\n a1 = a_func(a=1, b=2) # regular call\n a2 = a_func(cfg) # call with a cfg\n a3 = a_func(cfg, b=3, c=4) # call with extra overwrite\n\n Args:\n init_func (callable): a class's ``__init__`` method in usage 1. The\n class must have a ``from_config`` classmethod which takes `cfg` as\n the first argument.\n from_config (callable): the from_config function in usage 2. It must take `cfg`\n as its first argument.\n \"\"\"\n\n if init_func is not None:\n assert (\n inspect.isfunction(init_func)\n and from_config is None\n and init_func.__name__ == \"__init__\"\n ), \"Incorrect use of @configurable. Check API documentation for examples.\"\n\n @functools.wraps(init_func)\n def wrapped(self, *args, **kwargs):\n try:\n from_config_func = type(self).from_config\n except AttributeError as e:\n raise AttributeError(\n \"Class with @configurable must have a 'from_config' classmethod.\"\n ) from e\n if not inspect.ismethod(from_config_func):\n raise TypeError(\"Class with @configurable must have a 'from_config' classmethod.\")\n\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)\n init_func(self, **explicit_args)\n else:\n init_func(self, *args, **kwargs)\n\n return wrapped\n\n else:\n if from_config is None:\n return configurable # @configurable() is made equivalent to @configurable\n assert inspect.isfunction(\n from_config\n ), \"from_config argument of configurable must be a function!\"\n\n def wrapper(orig_func):\n @functools.wraps(orig_func)\n def wrapped(*args, **kwargs):\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config, *args, **kwargs)\n return orig_func(**explicit_args)\n else:\n return orig_func(*args, **kwargs)\n\n wrapped.from_config = from_config\n return wrapped\n\n return wrapper" }, { "identifier": "ShapeSpec", "path": "nativedancer/third_part/detectron2/layers/shape_spec.py", "snippet": "class ShapeSpec:\n \"\"\"\n A simple structure that contains basic shape specification about a tensor.\n It is often used as the auxiliary inputs/outputs of models,\n to complement the lack of shape inference ability among pytorch modules.\n \"\"\"\n\n channels: Optional[int] = None\n height: Optional[int] = None\n width: Optional[int] = None\n stride: Optional[int] = None" }, { "identifier": "nonzero_tuple", "path": "nativedancer/third_part/detectron2/layers/wrappers.py", "snippet": "def nonzero_tuple(x):\n \"\"\"\n A 'as_tuple=True' version of torch.nonzero to support torchscript.\n because of https://github.com/pytorch/pytorch/issues/38718\n \"\"\"\n if torch.jit.is_scripting():\n if x.dim() == 0:\n return x.unsqueeze(0).nonzero().unbind(1)\n return x.nonzero().unbind(1)\n else:\n return x.nonzero(as_tuple=True)" }, { "identifier": "Boxes", "path": "nativedancer/third_part/detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n if not isinstance(tensor, torch.Tensor):\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device(\"cpu\"))\n else:\n tensor = tensor.to(torch.float32)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "pairwise_iou", "path": "nativedancer/third_part/detectron2/structures/boxes.py", "snippet": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M, compute the IoU\n (intersection over union) between **all** N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoU, sized [N,M].\n \"\"\"\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n iou = torch.where(\n inter > 0,\n inter / (area1[:, None] + area2 - inter),\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\n )\n return iou" }, { "identifier": "ImageList", "path": "nativedancer/third_part/detectron2/structures/image_list.py", "snippet": "class ImageList:\n \"\"\"\n Structure that holds a list of images (of possibly\n varying sizes) as a single tensor.\n This works by padding the images to the same size.\n The original sizes of each image is stored in `image_sizes`.\n\n Attributes:\n image_sizes (list[tuple[int, int]]): each tuple is (h, w).\n During tracing, it becomes list[Tensor] instead.\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):\n \"\"\"\n Arguments:\n tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1\n image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can\n be smaller than (H, W) due to padding.\n \"\"\"\n self.tensor = tensor\n self.image_sizes = image_sizes\n\n def __len__(self) -> int:\n return len(self.image_sizes)\n\n def __getitem__(self, idx) -> torch.Tensor:\n \"\"\"\n Access the individual image in its original size.\n\n Args:\n idx: int or slice\n\n Returns:\n Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1\n \"\"\"\n size = self.image_sizes[idx]\n return self.tensor[idx, ..., : size[0], : size[1]]\n\n @torch.jit.unused\n def to(self, *args: Any, **kwargs: Any) -> \"ImageList\":\n cast_tensor = self.tensor.to(*args, **kwargs)\n return ImageList(cast_tensor, self.image_sizes)\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n @staticmethod\n def from_tensors(\n tensors: List[torch.Tensor],\n size_divisibility: int = 0,\n pad_value: float = 0.0,\n padding_constraints: Optional[Dict[str, int]] = None,\n ) -> \"ImageList\":\n \"\"\"\n Args:\n tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or\n (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded\n to the same shape with `pad_value`.\n size_divisibility (int): If `size_divisibility > 0`, add padding to ensure\n the common height and width is divisible by `size_divisibility`.\n This depends on the model and many models need a divisibility of 32.\n pad_value (float): value to pad.\n padding_constraints (optional[Dict]): If given, it would follow the format as\n {\"size_divisibility\": int, \"square_size\": int}, where `size_divisibility` will\n overwrite the above one if presented and `square_size` indicates the\n square padding size if `square_size` > 0.\n Returns:\n an `ImageList`.\n \"\"\"\n assert len(tensors) > 0\n assert isinstance(tensors, (tuple, list))\n for t in tensors:\n assert isinstance(t, torch.Tensor), type(t)\n assert t.shape[:-2] == tensors[0].shape[:-2], t.shape\n\n image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]\n image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]\n max_size = torch.stack(image_sizes_tensor).max(0).values\n\n if padding_constraints is not None:\n square_size = padding_constraints.get(\"square_size\", 0)\n if square_size > 0:\n # pad to square.\n max_size[0] = max_size[1] = square_size\n if \"size_divisibility\" in padding_constraints:\n size_divisibility = padding_constraints[\"size_divisibility\"]\n if size_divisibility > 1:\n stride = size_divisibility\n # the last two dims are H,W, both subject to divisibility requirement\n max_size = (max_size + (stride - 1)).div(stride, rounding_mode=\"floor\") * stride\n\n # handle weirdness of scripting and tracing ...\n if torch.jit.is_scripting():\n max_size: List[int] = max_size.to(dtype=torch.long).tolist()\n else:\n if torch.jit.is_tracing():\n image_sizes = image_sizes_tensor\n\n if len(tensors) == 1:\n # This seems slightly (2%) faster.\n # TODO: check whether it's faster for multiple images as well\n image_size = image_sizes[0]\n padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]\n batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)\n else:\n # max_size can be a tensor in tracing mode, therefore convert to list\n batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)\n device = (\n None if torch.jit.is_scripting() else (\"cpu\" if torch.jit.is_tracing() else None)\n )\n batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device)\n batched_imgs = move_device_like(batched_imgs, tensors[0])\n for i, img in enumerate(tensors):\n # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)`\n # Tracing mode cannot capture `copy_()` of temporary locals\n batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img)\n\n return ImageList(batched_imgs.contiguous(), image_sizes)" }, { "identifier": "Instances", "path": "nativedancer/third_part/detectron2/structures/instances.py", "snippet": "class Instances:\n \"\"\"\n This class represents a list of instances in an image.\n It stores the attributes of instances (e.g., boxes, masks, labels, scores) as \"fields\".\n All fields must have the same ``__len__`` which is the number of instances.\n\n All other (non-field) attributes of this class are considered private:\n they must start with '_' and are not modifiable by a user.\n\n Some basic usage:\n\n 1. Set/get/check a field:\n\n .. code-block:: python\n\n instances.gt_boxes = Boxes(...)\n print(instances.pred_masks) # a tensor of shape (N, H, W)\n print('gt_masks' in instances)\n\n 2. ``len(instances)`` returns the number of instances\n 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields\n and returns a new :class:`Instances`.\n Typically, ``indices`` is a integer vector of indices,\n or a binary mask of length ``num_instances``\n\n .. code-block:: python\n\n category_3_detections = instances[instances.pred_classes == 3]\n confident_detections = instances[instances.scores > 0.9]\n \"\"\"\n\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\n \"\"\"\n Args:\n image_size (height, width): the spatial size of the image.\n kwargs: fields to add to this `Instances`.\n \"\"\"\n self._image_size = image_size\n self._fields: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.set(k, v)\n\n @property\n def image_size(self) -> Tuple[int, int]:\n \"\"\"\n Returns:\n tuple: height, width\n \"\"\"\n return self._image_size\n\n def __setattr__(self, name: str, val: Any) -> None:\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n self.set(name, val)\n\n def __getattr__(self, name: str) -> Any:\n if name == \"_fields\" or name not in self._fields:\n raise AttributeError(\"Cannot find field '{}' in the given Instances!\".format(name))\n return self._fields[name]\n\n def set(self, name: str, value: Any) -> None:\n \"\"\"\n Set the field named `name` to `value`.\n The length of `value` must be the number of instances,\n and must agree with other existing fields in this object.\n \"\"\"\n with warnings.catch_warnings(record=True):\n data_len = len(value)\n if len(self._fields):\n assert (\n len(self) == data_len\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\n self._fields[name] = value\n\n def has(self, name: str) -> bool:\n \"\"\"\n Returns:\n bool: whether the field called `name` exists.\n \"\"\"\n return name in self._fields\n\n def remove(self, name: str) -> None:\n \"\"\"\n Remove the field called `name`.\n \"\"\"\n del self._fields[name]\n\n def get(self, name: str) -> Any:\n \"\"\"\n Returns the field called `name`.\n \"\"\"\n return self._fields[name]\n\n def get_fields(self) -> Dict[str, Any]:\n \"\"\"\n Returns:\n dict: a dict which maps names (str) to data of the fields\n\n Modifying the returned dict will modify this instance.\n \"\"\"\n return self._fields\n\n # Tensor-like methods\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\n \"\"\"\n Returns:\n Instances: all fields are called with a `to(device)`, if the field has this method.\n \"\"\"\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n if hasattr(v, \"to\"):\n v = v.to(*args, **kwargs)\n ret.set(k, v)\n return ret\n\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\n \"\"\"\n Args:\n item: an index-like object and will be used to index all the fields.\n\n Returns:\n If `item` is a string, return the data in the corresponding field.\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\n \"\"\"\n if type(item) == int:\n if item >= len(self) or item < -len(self):\n raise IndexError(\"Instances index out of range!\")\n else:\n item = slice(item, None, len(self))\n\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n ret.set(k, v[item])\n return ret\n\n def __len__(self) -> int:\n for v in self._fields.values():\n # use __len__ because len() has to be int and is not friendly to tracing\n return v.__len__()\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\n\n def __iter__(self):\n raise NotImplementedError(\"`Instances` object is not iterable!\")\n\n @staticmethod\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\n \"\"\"\n Args:\n instance_lists (list[Instances])\n\n Returns:\n Instances\n \"\"\"\n assert all(isinstance(i, Instances) for i in instance_lists)\n assert len(instance_lists) > 0\n if len(instance_lists) == 1:\n return instance_lists[0]\n\n image_size = instance_lists[0].image_size\n if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing\n for i in instance_lists[1:]:\n assert i.image_size == image_size\n ret = Instances(image_size)\n for k in instance_lists[0]._fields.keys():\n values = [i.get(k) for i in instance_lists]\n v0 = values[0]\n if isinstance(v0, torch.Tensor):\n values = torch.cat(values, dim=0)\n elif isinstance(v0, list):\n values = list(itertools.chain(*values))\n elif hasattr(type(v0), \"cat\"):\n values = type(v0).cat(values)\n else:\n raise ValueError(\"Unsupported type {} for concatenation\".format(type(v0)))\n ret.set(k, values)\n return ret\n\n def __str__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={}, \".format(len(self))\n s += \"image_height={}, \".format(self._image_size[0])\n s += \"image_width={}, \".format(self._image_size[1])\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\n return s\n\n __repr__ = __str__" }, { "identifier": "get_event_storage", "path": "nativedancer/third_part/detectron2/utils/events.py", "snippet": "def get_event_storage():\n \"\"\"\n Returns:\n The :class:`EventStorage` object that's currently being used.\n Throws an error if no :class:`EventStorage` is currently enabled.\n \"\"\"\n assert len(\n _CURRENT_STORAGE_STACK\n ), \"get_event_storage() has to be called inside a 'with EventStorage(...)' context!\"\n return _CURRENT_STORAGE_STACK[-1]" }, { "identifier": "Registry", "path": "nativedancer/third_part/detectron2/utils/registry.py", "snippet": "def _convert_target_to_string(t: Any) -> str:\ndef locate(name: str) -> Any:" }, { "identifier": "BottleneckBlock", "path": "nativedancer/third_part/detectron2/modeling/backbone/resnet.py", "snippet": "class BottleneckBlock(CNNBlockBase):\n \"\"\"\n The standard bottleneck residual block used by ResNet-50, 101 and 152\n defined in :paper:`ResNet`. It contains 3 conv layers with kernels\n 1x1, 3x3, 1x1, and a projection shortcut if needed.\n \"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n *,\n bottleneck_channels,\n stride=1,\n num_groups=1,\n norm=\"BN\",\n stride_in_1x1=False,\n dilation=1,\n ):\n \"\"\"\n Args:\n bottleneck_channels (int): number of output channels for the 3x3\n \"bottleneck\" conv layers.\n num_groups (int): number of groups for the 3x3 conv layer.\n norm (str or callable): normalization for all conv layers.\n See :func:`layers.get_norm` for supported format.\n stride_in_1x1 (bool): when stride>1, whether to put stride in the\n first 1x1 convolution or the bottleneck 3x3 convolution.\n dilation (int): the dilation rate of the 3x3 conv layer.\n \"\"\"\n super().__init__(in_channels, out_channels, stride)\n\n if in_channels != out_channels:\n self.shortcut = Conv2d(\n in_channels,\n out_channels,\n kernel_size=1,\n stride=stride,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n else:\n self.shortcut = None\n\n # The original MSRA ResNet models have stride in the first 1x1 conv\n # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have\n # stride in the 3x3 conv\n stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)\n\n self.conv1 = Conv2d(\n in_channels,\n bottleneck_channels,\n kernel_size=1,\n stride=stride_1x1,\n bias=False,\n norm=get_norm(norm, bottleneck_channels),\n )\n\n self.conv2 = Conv2d(\n bottleneck_channels,\n bottleneck_channels,\n kernel_size=3,\n stride=stride_3x3,\n padding=1 * dilation,\n bias=False,\n groups=num_groups,\n dilation=dilation,\n norm=get_norm(norm, bottleneck_channels),\n )\n\n self.conv3 = Conv2d(\n bottleneck_channels,\n out_channels,\n kernel_size=1,\n bias=False,\n norm=get_norm(norm, out_channels),\n )\n\n for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:\n if layer is not None: # shortcut can be None\n weight_init.c2_msra_fill(layer)\n\n # Zero-initialize the last normalization in each residual branch,\n # so that at the beginning, the residual branch starts with zeros,\n # and each residual block behaves like an identity.\n # See Sec 5.1 in \"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour\":\n # \"For BN layers, the learnable scaling coefficient γ is initialized\n # to be 1, except for each residual block's last BN\n # where γ is initialized to be 0.\"\n\n # nn.init.constant_(self.conv3.norm.weight, 0)\n # TODO this somehow hurts performance when training GN models from scratch.\n # Add it as an option when we need to use this code to train a backbone.\n\n def forward(self, x):\n out = self.conv1(x)\n out = F.relu_(out)\n\n out = self.conv2(out)\n out = F.relu_(out)\n\n out = self.conv3(out)\n\n if self.shortcut is not None:\n shortcut = self.shortcut(x)\n else:\n shortcut = x\n\n out += shortcut\n out = F.relu_(out)\n return out" }, { "identifier": "ResNet", "path": "nativedancer/third_part/detectron2/modeling/backbone/resnet.py", "snippet": "class ResNet(Backbone):\n \"\"\"\n Implement :paper:`ResNet`.\n \"\"\"\n\n def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0):\n \"\"\"\n Args:\n stem (nn.Module): a stem module\n stages (list[list[CNNBlockBase]]): several (typically 4) stages,\n each contains multiple :class:`CNNBlockBase`.\n num_classes (None or int): if None, will not perform classification.\n Otherwise, will create a linear layer.\n out_features (list[str]): name of the layers whose outputs should\n be returned in forward. Can be anything in \"stem\", \"linear\", or \"res2\" ...\n If None, will return the output of the last layer.\n freeze_at (int): The number of stages at the beginning to freeze.\n see :meth:`freeze` for detailed explanation.\n \"\"\"\n super().__init__()\n self.stem = stem\n self.num_classes = num_classes\n\n current_stride = self.stem.stride\n self._out_feature_strides = {\"stem\": current_stride}\n self._out_feature_channels = {\"stem\": self.stem.out_channels}\n\n self.stage_names, self.stages = [], []\n\n if out_features is not None:\n # Avoid keeping unused layers in this module. They consume extra memory\n # and may cause allreduce to fail\n num_stages = max(\n [{\"res2\": 1, \"res3\": 2, \"res4\": 3, \"res5\": 4}.get(f, 0) for f in out_features]\n )\n stages = stages[:num_stages]\n for i, blocks in enumerate(stages):\n assert len(blocks) > 0, len(blocks)\n for block in blocks:\n assert isinstance(block, CNNBlockBase), block\n\n name = \"res\" + str(i + 2)\n stage = nn.Sequential(*blocks)\n\n self.add_module(name, stage)\n self.stage_names.append(name)\n self.stages.append(stage)\n\n self._out_feature_strides[name] = current_stride = int(\n current_stride * np.prod([k.stride for k in blocks])\n )\n self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels\n self.stage_names = tuple(self.stage_names) # Make it static for scripting\n\n if num_classes is not None:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.linear = nn.Linear(curr_channels, num_classes)\n\n # Sec 5.1 in \"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour\":\n # \"The 1000-way fully-connected layer is initialized by\n # drawing weights from a zero-mean Gaussian with standard deviation of 0.01.\"\n nn.init.normal_(self.linear.weight, std=0.01)\n name = \"linear\"\n\n if out_features is None:\n out_features = [name]\n self._out_features = out_features\n assert len(self._out_features)\n children = [x[0] for x in self.named_children()]\n for out_feature in self._out_features:\n assert out_feature in children, \"Available children: {}\".format(\", \".join(children))\n self.freeze(freeze_at)\n\n def forward(self, x):\n \"\"\"\n Args:\n x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.\n\n Returns:\n dict[str->Tensor]: names and the corresponding features\n \"\"\"\n assert x.dim() == 4, f\"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!\"\n outputs = {}\n x = self.stem(x)\n if \"stem\" in self._out_features:\n outputs[\"stem\"] = x\n for name, stage in zip(self.stage_names, self.stages):\n x = stage(x)\n if name in self._out_features:\n outputs[name] = x\n if self.num_classes is not None:\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.linear(x)\n if \"linear\" in self._out_features:\n outputs[\"linear\"] = x\n return outputs\n\n def output_shape(self):\n return {\n name: ShapeSpec(\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\n )\n for name in self._out_features\n }\n\n def freeze(self, freeze_at=0):\n \"\"\"\n Freeze the first several stages of the ResNet. Commonly used in\n fine-tuning.\n\n Layers that produce the same feature map spatial size are defined as one\n \"stage\" by :paper:`FPN`.\n\n Args:\n freeze_at (int): number of stages to freeze.\n `1` means freezing the stem. `2` means freezing the stem and\n one residual stage, etc.\n\n Returns:\n nn.Module: this ResNet itself\n \"\"\"\n if freeze_at >= 1:\n self.stem.freeze()\n for idx, stage in enumerate(self.stages, start=2):\n if freeze_at >= idx:\n for block in stage.children():\n block.freeze()\n return self\n\n @staticmethod\n def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs):\n \"\"\"\n Create a list of blocks of the same type that forms one ResNet stage.\n\n Args:\n block_class (type): a subclass of CNNBlockBase that's used to create all blocks in this\n stage. A module of this type must not change spatial resolution of inputs unless its\n stride != 1.\n num_blocks (int): number of blocks in this stage\n in_channels (int): input channels of the entire stage.\n out_channels (int): output channels of **every block** in the stage.\n kwargs: other arguments passed to the constructor of\n `block_class`. If the argument name is \"xx_per_block\", the\n argument is a list of values to be passed to each block in the\n stage. Otherwise, the same argument is passed to every block\n in the stage.\n\n Returns:\n list[CNNBlockBase]: a list of block module.\n\n Examples:\n ::\n stage = ResNet.make_stage(\n BottleneckBlock, 3, in_channels=16, out_channels=64,\n bottleneck_channels=16, num_groups=1,\n stride_per_block=[2, 1, 1],\n dilations_per_block=[1, 1, 2]\n )\n\n Usually, layers that produce the same feature map spatial size are defined as one\n \"stage\" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should\n all be 1.\n \"\"\"\n blocks = []\n for i in range(num_blocks):\n curr_kwargs = {}\n for k, v in kwargs.items():\n if k.endswith(\"_per_block\"):\n assert len(v) == num_blocks, (\n f\"Argument '{k}' of make_stage should have the \"\n f\"same length as num_blocks={num_blocks}.\"\n )\n newk = k[: -len(\"_per_block\")]\n assert newk not in kwargs, f\"Cannot call make_stage with both {k} and {newk}!\"\n curr_kwargs[newk] = v[i]\n else:\n curr_kwargs[k] = v\n\n blocks.append(\n block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)\n )\n in_channels = out_channels\n return blocks\n\n @staticmethod\n def make_default_stages(depth, block_class=None, **kwargs):\n \"\"\"\n Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152).\n If it doesn't create the ResNet variant you need, please use :meth:`make_stage`\n instead for fine-grained customization.\n\n Args:\n depth (int): depth of ResNet\n block_class (type): the CNN block class. Has to accept\n `bottleneck_channels` argument for depth > 50.\n By default it is BasicBlock or BottleneckBlock, based on the\n depth.\n kwargs:\n other arguments to pass to `make_stage`. Should not contain\n stride and channels, as they are predefined for each depth.\n\n Returns:\n list[list[CNNBlockBase]]: modules in all stages; see arguments of\n :class:`ResNet.__init__`.\n \"\"\"\n num_blocks_per_stage = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n }[depth]\n if block_class is None:\n block_class = BasicBlock if depth < 50 else BottleneckBlock\n if depth < 50:\n in_channels = [64, 64, 128, 256]\n out_channels = [64, 128, 256, 512]\n else:\n in_channels = [64, 256, 512, 1024]\n out_channels = [256, 512, 1024, 2048]\n ret = []\n for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels):\n if depth >= 50:\n kwargs[\"bottleneck_channels\"] = o // 4\n ret.append(\n ResNet.make_stage(\n block_class=block_class,\n num_blocks=n,\n stride_per_block=[s] + [1] * (n - 1),\n in_channels=i,\n out_channels=o,\n **kwargs,\n )\n )\n return ret" }, { "identifier": "Matcher", "path": "nativedancer/third_part/detectron2/modeling/matcher.py", "snippet": "class Matcher:\n \"\"\"\n This class assigns to each predicted \"element\" (e.g., a box) a ground-truth\n element. Each predicted element will have exactly zero or one matches; each\n ground-truth element may be matched to zero or more predicted elements.\n\n The matching is determined by the MxN match_quality_matrix, that characterizes\n how well each (ground-truth, prediction)-pair match each other. For example,\n if the elements are boxes, this matrix may contain box intersection-over-union\n overlap values.\n\n The matcher returns (a) a vector of length N containing the index of the\n ground-truth element m in [0, M) that matches to prediction n in [0, N).\n (b) a vector of length N containing the labels for each prediction.\n \"\"\"\n\n def __init__(\n self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False\n ):\n \"\"\"\n Args:\n thresholds (list): a list of thresholds used to stratify predictions\n into levels.\n labels (list): a list of values to label predictions belonging at\n each level. A label can be one of {-1, 0, 1} signifying\n {ignore, negative class, positive class}, respectively.\n allow_low_quality_matches (bool): if True, produce additional matches\n for predictions with maximum match quality lower than high_threshold.\n See set_low_quality_matches_ for more details.\n\n For example,\n thresholds = [0.3, 0.5]\n labels = [0, -1, 1]\n All predictions with iou < 0.3 will be marked with 0 and\n thus will be considered as false positives while training.\n All predictions with 0.3 <= iou < 0.5 will be marked with -1 and\n thus will be ignored.\n All predictions with 0.5 <= iou will be marked with 1 and\n thus will be considered as true positives.\n \"\"\"\n # Add -inf and +inf to first and last position in thresholds\n thresholds = thresholds[:]\n assert thresholds[0] > 0\n thresholds.insert(0, -float(\"inf\"))\n thresholds.append(float(\"inf\"))\n # Currently torchscript does not support all + generator\n assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])])\n assert all([l in [-1, 0, 1] for l in labels])\n assert len(labels) == len(thresholds) - 1\n self.thresholds = thresholds\n self.labels = labels\n self.allow_low_quality_matches = allow_low_quality_matches\n\n def __call__(self, match_quality_matrix):\n \"\"\"\n Args:\n match_quality_matrix (Tensor[float]): an MxN tensor, containing the\n pairwise quality between M ground-truth elements and N predicted\n elements. All elements must be >= 0 (due to the us of `torch.nonzero`\n for selecting indices in :meth:`set_low_quality_matches_`).\n\n Returns:\n matches (Tensor[int64]): a vector of length N, where matches[i] is a matched\n ground-truth index in [0, M)\n match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates\n whether a prediction is a true or false positive or ignored\n \"\"\"\n assert match_quality_matrix.dim() == 2\n if match_quality_matrix.numel() == 0:\n default_matches = match_quality_matrix.new_full(\n (match_quality_matrix.size(1),), 0, dtype=torch.int64\n )\n # When no gt boxes exist, we define IOU = 0 and therefore set labels\n # to `self.labels[0]`, which usually defaults to background class 0\n # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds\n default_match_labels = match_quality_matrix.new_full(\n (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8\n )\n return default_matches, default_match_labels\n\n assert torch.all(match_quality_matrix >= 0)\n\n # match_quality_matrix is M (gt) x N (predicted)\n # Max over gt elements (dim 0) to find best gt candidate for each prediction\n matched_vals, matches = match_quality_matrix.max(dim=0)\n\n match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)\n\n for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):\n low_high = (matched_vals >= low) & (matched_vals < high)\n match_labels[low_high] = l\n\n if self.allow_low_quality_matches:\n self.set_low_quality_matches_(match_labels, match_quality_matrix)\n\n return matches, match_labels\n\n def set_low_quality_matches_(self, match_labels, match_quality_matrix):\n \"\"\"\n Produce additional matches for predictions that have only low-quality matches.\n Specifically, for each ground-truth G find the set of predictions that have\n maximum overlap with it (including ties); for each prediction in that set, if\n it is unmatched, then match it to the ground-truth G.\n\n This function implements the RPN assignment case (i) in Sec. 3.1.2 of\n :paper:`Faster R-CNN`.\n \"\"\"\n # For each gt, find the prediction with which it has highest quality\n highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)\n # Find the highest quality match available, even if it is low, including ties.\n # Note that the matches qualities must be positive due to the use of\n # `torch.nonzero`.\n _, pred_inds_with_highest_quality = nonzero_tuple(\n match_quality_matrix == highest_quality_foreach_gt[:, None]\n )\n # If an anchor was labeled positive only due to a low-quality match\n # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B.\n # This follows the implementation in Detectron, and is found to have no significant impact.\n match_labels[pred_inds_with_highest_quality] = 1" }, { "identifier": "ROIPooler", "path": "nativedancer/third_part/detectron2/modeling/poolers.py", "snippet": "class ROIPooler(nn.Module):\n \"\"\"\n Region of interest feature map pooler that supports pooling from one or more\n feature maps.\n \"\"\"\n\n def __init__(\n self,\n output_size,\n scales,\n sampling_ratio,\n pooler_type,\n canonical_box_size=224,\n canonical_level=4,\n ):\n \"\"\"\n Args:\n output_size (int, tuple[int] or list[int]): output size of the pooled region,\n e.g., 14 x 14. If tuple or list is given, the length must be 2.\n scales (list[float]): The scale for each low-level pooling op relative to\n the input image. For a feature map with stride s relative to the input\n image, scale is defined as 1/s. The stride must be power of 2.\n When there are multiple scales, they must form a pyramid, i.e. they must be\n a monotically decreasing geometric sequence with a factor of 1/2.\n sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op.\n pooler_type (string): Name of the type of pooling operation that should be applied.\n For instance, \"ROIPool\" or \"ROIAlignV2\".\n canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default\n is heuristically defined as 224 pixels in the FPN paper (based on ImageNet\n pre-training).\n canonical_level (int): The feature map level index from which a canonically-sized box\n should be placed. The default is defined as level 4 (stride=16) in the FPN paper,\n i.e., a box of size 224x224 will be placed on the feature with stride=16.\n The box placement for all boxes will be determined from their sizes w.r.t\n canonical_box_size. For example, a box whose area is 4x that of a canonical box\n should be used to pool features from feature level ``canonical_level+1``.\n\n Note that the actual input feature maps given to this module may not have\n sufficiently many levels for the input boxes. If the boxes are too large or too\n small for the input feature maps, the closest level will be used.\n \"\"\"\n super().__init__()\n\n if isinstance(output_size, int):\n output_size = (output_size, output_size)\n assert len(output_size) == 2\n assert isinstance(output_size[0], int) and isinstance(output_size[1], int)\n self.output_size = output_size\n\n if pooler_type == \"ROIAlign\":\n self.level_poolers = nn.ModuleList(\n ROIAlign(\n output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False\n )\n for scale in scales\n )\n elif pooler_type == \"ROIAlignV2\":\n self.level_poolers = nn.ModuleList(\n ROIAlign(\n output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True\n )\n for scale in scales\n )\n elif pooler_type == \"ROIPool\":\n self.level_poolers = nn.ModuleList(\n RoIPool(output_size, spatial_scale=scale) for scale in scales\n )\n elif pooler_type == \"ROIAlignRotated\":\n self.level_poolers = nn.ModuleList(\n ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio)\n for scale in scales\n )\n else:\n raise ValueError(\"Unknown pooler type: {}\".format(pooler_type))\n\n # Map scale (defined as 1 / stride) to its feature map level under the\n # assumption that stride is a power of 2.\n min_level = -(math.log2(scales[0]))\n max_level = -(math.log2(scales[-1]))\n assert math.isclose(min_level, int(min_level)) and math.isclose(\n max_level, int(max_level)\n ), \"Featuremap stride is not power of 2!\"\n self.min_level = int(min_level)\n self.max_level = int(max_level)\n assert (\n len(scales) == self.max_level - self.min_level + 1\n ), \"[ROIPooler] Sizes of input featuremaps do not form a pyramid!\"\n assert 0 <= self.min_level and self.min_level <= self.max_level\n self.canonical_level = canonical_level\n assert canonical_box_size > 0\n self.canonical_box_size = canonical_box_size\n\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\n \"\"\"\n Args:\n x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those\n used to construct this module.\n box_lists (list[Boxes] | list[RotatedBoxes]):\n A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch.\n The box coordinates are defined on the original image and\n will be scaled by the `scales` argument of :class:`ROIPooler`.\n\n Returns:\n Tensor:\n A tensor of shape (M, C, output_size, output_size) where M is the total number of\n boxes aggregated over all N batch images and C is the number of channels in `x`.\n \"\"\"\n num_level_assignments = len(self.level_poolers)\n\n if not is_fx_tracing():\n torch._assert(\n isinstance(x, list) and isinstance(box_lists, list),\n \"Arguments to pooler must be lists\",\n )\n assert_fx_safe(\n len(x) == num_level_assignments,\n \"unequal value, num_level_assignments={}, but x is list of {} Tensors\".format(\n num_level_assignments, len(x)\n ),\n )\n assert_fx_safe(\n len(box_lists) == x[0].size(0),\n \"unequal value, x[0] batch dim 0 is {}, but box_list has length {}\".format(\n x[0].size(0), len(box_lists)\n ),\n )\n if len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\n\n pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\n\n if num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\n\n level_assignments = assign_boxes_to_levels(\n box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level\n )\n\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n\n output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\n\n for level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n # Use index_put_ instead of advance indexing, to avoid pytorch/issues/49852\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\n\n return output" }, { "identifier": "add_ground_truth_to_proposals", "path": "nativedancer/third_part/detectron2/modeling/proposal_generator/proposal_utils.py", "snippet": "def add_ground_truth_to_proposals(\n gt: Union[List[Instances], List[Boxes]], proposals: List[Instances]\n) -> List[Instances]:\n \"\"\"\n Call `add_ground_truth_to_proposals_single_image` for all images.\n\n Args:\n gt(Union[List[Instances], List[Boxes]): list of N elements. Element i is a Instances\n representing the ground-truth for image i.\n proposals (list[Instances]): list of N elements. Element i is a Instances\n representing the proposals for image i.\n\n Returns:\n list[Instances]: list of N Instances. Each is the proposals for the image,\n with field \"proposal_boxes\" and \"objectness_logits\".\n \"\"\"\n assert gt is not None\n\n if len(proposals) != len(gt):\n raise ValueError(\"proposals and gt should have the same length as the number of images!\")\n if len(proposals) == 0:\n return proposals\n\n return [\n add_ground_truth_to_proposals_single_image(gt_i, proposals_i)\n for gt_i, proposals_i in zip(gt, proposals)\n ]" }, { "identifier": "subsample_labels", "path": "nativedancer/third_part/detectron2/modeling/sampling.py", "snippet": "def subsample_labels(\n labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int\n):\n \"\"\"\n Return `num_samples` (or fewer, if not enough found)\n random samples from `labels` which is a mixture of positives & negatives.\n It will try to return as many positives as possible without\n exceeding `positive_fraction * num_samples`, and then try to\n fill the remaining slots with negatives.\n\n Args:\n labels (Tensor): (N, ) label vector with values:\n * -1: ignore\n * bg_label: background (\"negative\") class\n * otherwise: one or more foreground (\"positive\") classes\n num_samples (int): The total number of labels with value >= 0 to return.\n Values that are not sampled will be filled with -1 (ignore).\n positive_fraction (float): The number of subsampled labels with values > 0\n is `min(num_positives, int(positive_fraction * num_samples))`. The number\n of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`.\n In order words, if there are not enough positives, the sample is filled with\n negatives. If there are also not enough negatives, then as many elements are\n sampled as is possible.\n bg_label (int): label index of background (\"negative\") class.\n\n Returns:\n pos_idx, neg_idx (Tensor):\n 1D vector of indices. The total length of both is `num_samples` or fewer.\n \"\"\"\n positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0]\n negative = nonzero_tuple(labels == bg_label)[0]\n\n num_pos = int(num_samples * positive_fraction)\n # protect against not enough positive examples\n num_pos = min(positive.numel(), num_pos)\n num_neg = num_samples - num_pos\n # protect against not enough negative examples\n num_neg = min(negative.numel(), num_neg)\n\n # randomly select positive and negative examples\n perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]\n perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]\n\n pos_idx = positive[perm1]\n neg_idx = negative[perm2]\n return pos_idx, neg_idx" }, { "identifier": "build_box_head", "path": "nativedancer/third_part/detectron2/modeling/roi_heads/box_head.py", "snippet": "def build_box_head(cfg, input_shape):\n \"\"\"\n Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.ROI_BOX_HEAD.NAME\n return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape)" }, { "identifier": "FastRCNNOutputLayers", "path": "nativedancer/third_part/detectron2/modeling/roi_heads/fast_rcnn.py", "snippet": "class FastRCNNOutputLayers(nn.Module):\n \"\"\"\n Two linear layers for predicting Fast R-CNN outputs:\n\n 1. proposal-to-detection box regression deltas\n 2. classification scores\n \"\"\"\n\n @configurable\n def __init__(\n self,\n input_shape: ShapeSpec,\n *,\n box2box_transform,\n num_classes: int,\n test_score_thresh: float = 0.0,\n test_nms_thresh: float = 0.5,\n test_topk_per_image: int = 100,\n cls_agnostic_bbox_reg: bool = False,\n smooth_l1_beta: float = 0.0,\n box_reg_loss_type: str = \"smooth_l1\",\n loss_weight: Union[float, Dict[str, float]] = 1.0,\n use_fed_loss: bool = False,\n use_sigmoid_ce: bool = False,\n get_fed_loss_cls_weights: Optional[Callable] = None,\n fed_loss_num_classes: int = 50,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n input_shape (ShapeSpec): shape of the input feature to this module\n box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):\n num_classes (int): number of foreground classes\n test_score_thresh (float): threshold to filter predictions results.\n test_nms_thresh (float): NMS threshold for prediction results.\n test_topk_per_image (int): number of top predictions to produce per image.\n cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression\n smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if\n `box_reg_loss_type` is \"smooth_l1\"\n box_reg_loss_type (str): Box regression loss type. One of: \"smooth_l1\", \"giou\",\n \"diou\", \"ciou\"\n loss_weight (float|dict): weights to use for losses. Can be single float for weighting\n all losses, or a dict of individual weightings. Valid dict keys are:\n * \"loss_cls\": applied to classification loss\n * \"loss_box_reg\": applied to box regression loss\n use_fed_loss (bool): whether to use federated loss which samples additional negative\n classes to calculate the loss\n use_sigmoid_ce (bool): whether to calculate the loss using weighted average of binary\n cross entropy with logits. This could be used together with federated loss\n get_fed_loss_cls_weights (Callable): a callable which takes dataset name and frequency\n weight power, and returns the probabilities to sample negative classes for\n federated loss. The implementation can be found in\n detectron2/data/detection_utils.py\n fed_loss_num_classes (int): number of federated classes to keep in total\n \"\"\"\n super().__init__()\n if isinstance(input_shape, int): # some backward compatibility\n input_shape = ShapeSpec(channels=input_shape)\n self.num_classes = num_classes\n input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)\n # prediction layer for num_classes foreground classes and one background class (hence + 1)\n self.cls_score = nn.Linear(input_size, num_classes + 1)\n num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes\n box_dim = len(box2box_transform.weights)\n self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)\n\n nn.init.normal_(self.cls_score.weight, std=0.01)\n nn.init.normal_(self.bbox_pred.weight, std=0.001)\n for l in [self.cls_score, self.bbox_pred]:\n nn.init.constant_(l.bias, 0)\n\n self.box2box_transform = box2box_transform\n self.smooth_l1_beta = smooth_l1_beta\n self.test_score_thresh = test_score_thresh\n self.test_nms_thresh = test_nms_thresh\n self.test_topk_per_image = test_topk_per_image\n self.box_reg_loss_type = box_reg_loss_type\n if isinstance(loss_weight, float):\n loss_weight = {\"loss_cls\": loss_weight, \"loss_box_reg\": loss_weight}\n self.loss_weight = loss_weight\n self.use_fed_loss = use_fed_loss\n self.use_sigmoid_ce = use_sigmoid_ce\n self.fed_loss_num_classes = fed_loss_num_classes\n\n if self.use_fed_loss:\n assert self.use_sigmoid_ce, \"Please use sigmoid cross entropy loss with federated loss\"\n fed_loss_cls_weights = get_fed_loss_cls_weights()\n assert (\n len(fed_loss_cls_weights) == self.num_classes\n ), \"Please check the provided fed_loss_cls_weights. Their size should match num_classes\"\n self.register_buffer(\"fed_loss_cls_weights\", fed_loss_cls_weights)\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n return {\n \"input_shape\": input_shape,\n \"box2box_transform\": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),\n # fmt: off\n \"num_classes\" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,\n \"cls_agnostic_bbox_reg\" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,\n \"smooth_l1_beta\" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,\n \"test_score_thresh\" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,\n \"test_nms_thresh\" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,\n \"test_topk_per_image\" : cfg.TEST.DETECTIONS_PER_IMAGE,\n \"box_reg_loss_type\" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE,\n \"loss_weight\" : {\"loss_box_reg\": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT}, # noqa\n \"use_fed_loss\" : cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS,\n \"use_sigmoid_ce\" : cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE,\n \"get_fed_loss_cls_weights\" : lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER), # noqa\n \"fed_loss_num_classes\" : cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES,\n # fmt: on\n }\n\n def forward(self, x):\n \"\"\"\n Args:\n x: per-region features of shape (N, ...) for N bounding boxes to predict.\n\n Returns:\n (Tensor, Tensor):\n First tensor: shape (N,K+1), scores for each of the N box. Each row contains the\n scores for K object categories and 1 background class.\n\n Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),\n or (N,4) for class-agnostic regression.\n \"\"\"\n if x.dim() > 2:\n x = torch.flatten(x, start_dim=1)\n scores = self.cls_score(x)\n proposal_deltas = self.bbox_pred(x)\n return scores, proposal_deltas\n\n def losses(self, predictions, proposals):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were used\n to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,\n ``gt_classes`` are expected.\n\n Returns:\n Dict[str, Tensor]: dict of losses\n \"\"\"\n scores, proposal_deltas = predictions\n\n # parse classification outputs\n gt_classes = (\n cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)\n )\n _log_classification_stats(scores, gt_classes)\n\n # parse box regression outputs\n if len(proposals):\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4\n assert not proposal_boxes.requires_grad, \"Proposals should not require gradients!\"\n # If \"gt_boxes\" does not exist, the proposals must be all negative and\n # should not be included in regression loss computation.\n # Here we just use proposal_boxes as an arbitrary placeholder because its\n # value won't be used in self.box_reg_loss().\n gt_boxes = cat(\n [(p.gt_boxes if p.has(\"gt_boxes\") else p.proposal_boxes).tensor for p in proposals],\n dim=0,\n )\n else:\n proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)\n\n if self.use_sigmoid_ce:\n loss_cls = self.sigmoid_cross_entropy_loss(scores, gt_classes)\n else:\n loss_cls = cross_entropy(scores, gt_classes, reduction=\"mean\")\n\n losses = {\n \"loss_cls\": loss_cls,\n \"loss_box_reg\": self.box_reg_loss(\n proposal_boxes, gt_boxes, proposal_deltas, gt_classes\n ),\n }\n return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}\n\n # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py # noqa\n # with slight modifications\n def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight):\n \"\"\"\n Args:\n gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\n num_fed_loss_classes: minimum number of classes to keep when calculating federated loss.\n Will sample negative classes if number of unique gt_classes is smaller than this value.\n num_classes: number of foreground classes\n weight: probabilities used to sample negative classes\n\n Returns:\n Tensor:\n classes to keep when calculating the federated loss, including both unique gt\n classes and sampled negative classes.\n \"\"\"\n unique_gt_classes = torch.unique(gt_classes)\n prob = unique_gt_classes.new_ones(num_classes + 1).float()\n prob[-1] = 0\n if len(unique_gt_classes) < num_fed_loss_classes:\n prob[:num_classes] = weight.float().clone()\n prob[unique_gt_classes] = 0\n sampled_negative_classes = torch.multinomial(\n prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False\n )\n fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes])\n else:\n fed_loss_classes = unique_gt_classes\n return fed_loss_classes\n\n # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py#L113 # noqa\n # with slight modifications\n def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes):\n \"\"\"\n Args:\n pred_class_logits: shape (N, K+1), scores for each of the N box. Each row contains the\n scores for K object categories and 1 background class\n gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\n \"\"\"\n if pred_class_logits.numel() == 0:\n return pred_class_logits.new_zeros([1])[0]\n\n N = pred_class_logits.shape[0]\n K = pred_class_logits.shape[1] - 1\n\n target = pred_class_logits.new_zeros(N, K + 1)\n target[range(len(gt_classes)), gt_classes] = 1\n target = target[:, :K]\n\n cls_loss = F.binary_cross_entropy_with_logits(\n pred_class_logits[:, :-1], target, reduction=\"none\"\n )\n\n if self.use_fed_loss:\n fed_loss_classes = self.get_fed_loss_classes(\n gt_classes,\n num_fed_loss_classes=self.fed_loss_num_classes,\n num_classes=K,\n weight=self.fed_loss_cls_weights,\n )\n fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1)\n fed_loss_classes_mask[fed_loss_classes] = 1\n fed_loss_classes_mask = fed_loss_classes_mask[:K]\n weight = fed_loss_classes_mask.view(1, K).expand(N, K).float()\n else:\n weight = 1\n\n loss = torch.sum(cls_loss * weight) / N\n return loss\n\n def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes):\n \"\"\"\n Args:\n proposal_boxes/gt_boxes are tensors with the same shape (R, 4 or 5).\n pred_deltas has shape (R, 4 or 5), or (R, num_classes * (4 or 5)).\n gt_classes is a long tensor of shape R, the gt class label of each proposal.\n R shall be the number of proposals.\n \"\"\"\n box_dim = proposal_boxes.shape[1] # 4 or 5\n # Regression loss is only computed for foreground proposals (those matched to a GT)\n fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]\n if pred_deltas.shape[1] == box_dim: # cls-agnostic regression\n fg_pred_deltas = pred_deltas[fg_inds]\n else:\n fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[\n fg_inds, gt_classes[fg_inds]\n ]\n\n loss_box_reg = _dense_box_regression_loss(\n [proposal_boxes[fg_inds]],\n self.box2box_transform,\n [fg_pred_deltas.unsqueeze(0)],\n [gt_boxes[fg_inds]],\n ...,\n self.box_reg_loss_type,\n self.smooth_l1_beta,\n )\n\n # The reg loss is normalized using the total number of regions (R), not the number\n # of foreground regions even though the box regression loss is only defined on\n # foreground regions. Why? Because doing so gives equal training influence to\n # each foreground example. To see how, consider two different minibatches:\n # (1) Contains a single foreground region\n # (2) Contains 100 foreground regions\n # If we normalize by the number of foreground regions, the single example in\n # minibatch (1) will be given 100 times as much influence as each foreground\n # example in minibatch (2). Normalizing by the total number of regions, R,\n # means that the single example in minibatch (1) and each of the 100 examples\n # in minibatch (2) are given equal influence.\n return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty\n\n def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions. The ``proposal_boxes`` field is expected.\n\n Returns:\n list[Instances]: same as `fast_rcnn_inference`.\n list[Tensor]: same as `fast_rcnn_inference`.\n \"\"\"\n boxes = self.predict_boxes(predictions, proposals)\n scores = self.predict_probs(predictions, proposals)\n image_shapes = [x.image_size for x in proposals]\n return fast_rcnn_inference(\n boxes,\n scores,\n image_shapes,\n self.test_score_thresh,\n self.test_nms_thresh,\n self.test_topk_per_image,\n )\n\n def predict_boxes_for_gt_classes(self, predictions, proposals):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were used\n to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted boxes for GT classes in case of\n class-specific box head. Element i of the list has shape (Ri, B), where Ri is\n the number of proposals for image i and B is the box dimension (4 or 5)\n \"\"\"\n if not len(proposals):\n return []\n scores, proposal_deltas = predictions\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)\n N, B = proposal_boxes.shape\n predict_boxes = self.box2box_transform.apply_deltas(\n proposal_deltas, proposal_boxes\n ) # Nx(KxB)\n\n K = predict_boxes.shape[1] // B\n if K > 1:\n gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)\n # Some proposals are ignored or have a background class. Their gt_classes\n # cannot be used as index.\n gt_classes = gt_classes.clamp_(0, K - 1)\n\n predict_boxes = predict_boxes.view(N, K, B)[\n torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes\n ]\n num_prop_per_image = [len(p) for p in proposals]\n return predict_boxes.split(num_prop_per_image)\n\n def predict_boxes(\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\n ):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions. The ``proposal_boxes`` field is expected.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted class-specific or class-agnostic boxes\n for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is\n the number of proposals for image i and B is the box dimension (4 or 5)\n \"\"\"\n if not len(proposals):\n return []\n _, proposal_deltas = predictions\n num_prop_per_image = [len(p) for p in proposals]\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)\n predict_boxes = self.box2box_transform.apply_deltas(\n proposal_deltas,\n proposal_boxes,\n ) # Nx(KxB)\n return predict_boxes.split(num_prop_per_image)\n\n def predict_probs(\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\n ):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted class probabilities for each image.\n Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i.\n \"\"\"\n scores, _ = predictions\n num_inst_per_image = [len(p) for p in proposals]\n if self.use_sigmoid_ce:\n probs = scores.sigmoid()\n else:\n probs = F.softmax(scores, dim=-1)\n return probs.split(num_inst_per_image, dim=0)" }, { "identifier": "build_keypoint_head", "path": "nativedancer/third_part/detectron2/modeling/roi_heads/keypoint_head.py", "snippet": "def build_keypoint_head(cfg, input_shape):\n \"\"\"\n Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.ROI_KEYPOINT_HEAD.NAME\n return ROI_KEYPOINT_HEAD_REGISTRY.get(name)(cfg, input_shape)" }, { "identifier": "build_mask_head", "path": "nativedancer/third_part/detectron2/modeling/roi_heads/mask_head.py", "snippet": "def build_mask_head(cfg, input_shape):\n \"\"\"\n Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.ROI_MASK_HEAD.NAME\n return ROI_MASK_HEAD_REGISTRY.get(name)(cfg, input_shape)" } ]
import inspect import logging import numpy as np import torch from typing import Dict, List, Optional, Tuple from torch import nn from ...config import configurable from ...layers import ShapeSpec, nonzero_tuple from ...structures import Boxes, ImageList, Instances, pairwise_iou from ...utils.events import get_event_storage from ...utils.registry import Registry from ..backbone.resnet import BottleneckBlock, ResNet from ..matcher import Matcher from ..poolers import ROIPooler from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals from ..sampling import subsample_labels from .box_head import build_box_head from .fast_rcnn import FastRCNNOutputLayers from .keypoint_head import build_keypoint_head from .mask_head import build_mask_head
20,227
Args: proposals (list[Instances]): A list of N Instances, where N is the number of images in the batch. bg_label: label index of background class. Returns: list[Instances]: N Instances, each contains only the selected foreground instances. list[Tensor]: N boolean vector, correspond to the selection mask of each Instances object. True for selected instances. """ assert isinstance(proposals, (list, tuple)) assert isinstance(proposals[0], Instances) assert proposals[0].has("gt_classes") fg_proposals = [] fg_selection_masks = [] for proposals_per_image in proposals: gt_classes = proposals_per_image.gt_classes fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label) fg_idxs = fg_selection_mask.nonzero().squeeze(1) fg_proposals.append(proposals_per_image[fg_idxs]) fg_selection_masks.append(fg_selection_mask) return fg_proposals, fg_selection_masks def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]: """ Args: proposals (list[Instances]): a list of N Instances, where N is the number of images. Returns: proposals: only contains proposals with at least one visible keypoint. Note that this is still slightly different from Detectron. In Detectron, proposals for training keypoint head are re-sampled from all the proposals with IOU>threshold & >=1 visible keypoint. Here, the proposals are first sampled from all proposals with IOU>threshold, then proposals with no visible keypoint are filtered out. This strategy seems to make no difference on Detectron and is easier to implement. """ ret = [] all_num_fg = [] for proposals_per_image in proposals: # If empty/unannotated image (hard negatives), skip filtering for train if len(proposals_per_image) == 0: ret.append(proposals_per_image) continue gt_keypoints = proposals_per_image.gt_keypoints.tensor # #fg x K x 3 vis_mask = gt_keypoints[:, :, 2] >= 1 xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1] proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4 kp_in_box = ( (xs >= proposal_boxes[:, :, 0]) & (xs <= proposal_boxes[:, :, 2]) & (ys >= proposal_boxes[:, :, 1]) & (ys <= proposal_boxes[:, :, 3]) ) selection = (kp_in_box & vis_mask).any(dim=1) selection_idxs = nonzero_tuple(selection)[0] all_num_fg.append(selection_idxs.numel()) ret.append(proposals_per_image[selection_idxs]) storage = get_event_storage() storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg)) return ret class ROIHeads(torch.nn.Module): """ ROIHeads perform all per-region computation in an R-CNN. It typically contains logic to 1. (in training only) match proposals with ground truth and sample them 2. crop the regions and extract per-region features using proposals 3. make per-region predictions with different heads It can have many variants, implemented as subclasses of this class. This base class contains the logic to match/sample proposals. But it is not necessary to inherit this class if the sampling logic is not needed. """ @configurable def __init__( self, *, num_classes, batch_size_per_image, positive_fraction, proposal_matcher, proposal_append_gt=True, ): """ NOTE: this interface is experimental. Args: num_classes (int): number of foreground classes (i.e. background is not included) batch_size_per_image (int): number of proposals to sample for training positive_fraction (float): fraction of positive (foreground) proposals to sample for training. proposal_matcher (Matcher): matcher that matches proposals and ground truth proposal_append_gt (bool): whether to include ground truth as proposals as well """ super().__init__() self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction self.num_classes = num_classes self.proposal_matcher = proposal_matcher self.proposal_append_gt = proposal_append_gt @classmethod def from_config(cls, cfg): return { "batch_size_per_image": cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, "positive_fraction": cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION, "num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES, "proposal_append_gt": cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT, # Matcher to assign box proposals to gt boxes
# Copyright (c) Facebook, Inc. and its affiliates. ROI_HEADS_REGISTRY = Registry("ROI_HEADS") ROI_HEADS_REGISTRY.__doc__ = """ Registry for ROI heads in a generalized R-CNN model. ROIHeads take feature maps and region proposals, and perform per-region computation. The registered object will be called with `obj(cfg, input_shape)`. The call is expected to return an :class:`ROIHeads`. """ logger = logging.getLogger(__name__) def build_roi_heads(cfg, input_shape): """ Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`. """ name = cfg.MODEL.ROI_HEADS.NAME return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape) def select_foreground_proposals( proposals: List[Instances], bg_label: int ) -> Tuple[List[Instances], List[torch.Tensor]]: """ Given a list of N Instances (for N images), each containing a `gt_classes` field, return a list of Instances that contain only instances with `gt_classes != -1 && gt_classes != bg_label`. Args: proposals (list[Instances]): A list of N Instances, where N is the number of images in the batch. bg_label: label index of background class. Returns: list[Instances]: N Instances, each contains only the selected foreground instances. list[Tensor]: N boolean vector, correspond to the selection mask of each Instances object. True for selected instances. """ assert isinstance(proposals, (list, tuple)) assert isinstance(proposals[0], Instances) assert proposals[0].has("gt_classes") fg_proposals = [] fg_selection_masks = [] for proposals_per_image in proposals: gt_classes = proposals_per_image.gt_classes fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label) fg_idxs = fg_selection_mask.nonzero().squeeze(1) fg_proposals.append(proposals_per_image[fg_idxs]) fg_selection_masks.append(fg_selection_mask) return fg_proposals, fg_selection_masks def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]: """ Args: proposals (list[Instances]): a list of N Instances, where N is the number of images. Returns: proposals: only contains proposals with at least one visible keypoint. Note that this is still slightly different from Detectron. In Detectron, proposals for training keypoint head are re-sampled from all the proposals with IOU>threshold & >=1 visible keypoint. Here, the proposals are first sampled from all proposals with IOU>threshold, then proposals with no visible keypoint are filtered out. This strategy seems to make no difference on Detectron and is easier to implement. """ ret = [] all_num_fg = [] for proposals_per_image in proposals: # If empty/unannotated image (hard negatives), skip filtering for train if len(proposals_per_image) == 0: ret.append(proposals_per_image) continue gt_keypoints = proposals_per_image.gt_keypoints.tensor # #fg x K x 3 vis_mask = gt_keypoints[:, :, 2] >= 1 xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1] proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4 kp_in_box = ( (xs >= proposal_boxes[:, :, 0]) & (xs <= proposal_boxes[:, :, 2]) & (ys >= proposal_boxes[:, :, 1]) & (ys <= proposal_boxes[:, :, 3]) ) selection = (kp_in_box & vis_mask).any(dim=1) selection_idxs = nonzero_tuple(selection)[0] all_num_fg.append(selection_idxs.numel()) ret.append(proposals_per_image[selection_idxs]) storage = get_event_storage() storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg)) return ret class ROIHeads(torch.nn.Module): """ ROIHeads perform all per-region computation in an R-CNN. It typically contains logic to 1. (in training only) match proposals with ground truth and sample them 2. crop the regions and extract per-region features using proposals 3. make per-region predictions with different heads It can have many variants, implemented as subclasses of this class. This base class contains the logic to match/sample proposals. But it is not necessary to inherit this class if the sampling logic is not needed. """ @configurable def __init__( self, *, num_classes, batch_size_per_image, positive_fraction, proposal_matcher, proposal_append_gt=True, ): """ NOTE: this interface is experimental. Args: num_classes (int): number of foreground classes (i.e. background is not included) batch_size_per_image (int): number of proposals to sample for training positive_fraction (float): fraction of positive (foreground) proposals to sample for training. proposal_matcher (Matcher): matcher that matches proposals and ground truth proposal_append_gt (bool): whether to include ground truth as proposals as well """ super().__init__() self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction self.num_classes = num_classes self.proposal_matcher = proposal_matcher self.proposal_append_gt = proposal_append_gt @classmethod def from_config(cls, cfg): return { "batch_size_per_image": cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, "positive_fraction": cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION, "num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES, "proposal_append_gt": cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT, # Matcher to assign box proposals to gt boxes
"proposal_matcher": Matcher(
11
2023-12-10 20:14:00+00:00
24k
mkang315/ASF-YOLO
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n ie = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if network.get_parameters()[0].get_layout().empty:\n network.get_parameters()[0].set_layout(Layout(\"NCHW\"))\n batch_dim = get_batch(network)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n executable_network = ie.compile_model(network, device_name=\"CPU\") # device_name=\"MYRIAD\" for Intel NCS2\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs=\"x:0\", outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, \"r\") as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode(\"utf-8\"))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith(\"tensorflow\")\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.executable_network([im]).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in [\"http\", \"grpc\"]), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "SegmentationModel", "path": "models/yolo.py", "snippet": "class SegmentationModel(DetectionModel):\n # YOLOv5 segmentation model\n def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):\n super().__init__(cfg, ch, nc, anchors)" }, { "identifier": "Callbacks", "path": "utils/callbacks.py", "snippet": "class Callbacks:\n \"\"\"\"\n Handles all registered callbacks for YOLOv5 Hooks\n \"\"\"\n\n def __init__(self):\n # Define the available callbacks\n self._callbacks = {\n 'on_pretrain_routine_start': [],\n 'on_pretrain_routine_end': [],\n 'on_train_start': [],\n 'on_train_epoch_start': [],\n 'on_train_batch_start': [],\n 'optimizer_step': [],\n 'on_before_zero_grad': [],\n 'on_train_batch_end': [],\n 'on_train_epoch_end': [],\n 'on_val_start': [],\n 'on_val_batch_start': [],\n 'on_val_image_end': [],\n 'on_val_batch_end': [],\n 'on_val_end': [],\n 'on_fit_epoch_end': [], # fit = train + val\n 'on_model_save': [],\n 'on_train_end': [],\n 'on_params_update': [],\n 'teardown': [],}\n self.stop_training = False # set True to interrupt training\n\n def register_action(self, hook, name='', callback=None):\n \"\"\"\n Register a new action to a callback hook\n\n Args:\n hook: The callback hook name to register the action to\n name: The name of the action for later reference\n callback: The callback to fire\n \"\"\"\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n assert callable(callback), f\"callback '{callback}' is not callable\"\n self._callbacks[hook].append({'name': name, 'callback': callback})\n\n def get_registered_actions(self, hook=None):\n \"\"\"\"\n Returns all the registered actions by callback hook\n\n Args:\n hook: The name of the hook to check, defaults to all\n \"\"\"\n return self._callbacks[hook] if hook else self._callbacks\n\n def run(self, hook, *args, thread=False, **kwargs):\n \"\"\"\n Loop through the registered actions and fire all callbacks on main thread\n\n Args:\n hook: The name of the hook to check, defaults to all\n args: Arguments to receive from YOLOv5\n thread: (boolean) Run callbacks in daemon thread\n kwargs: Keyword Arguments to receive from YOLOv5\n \"\"\"\n\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n for logger in self._callbacks[hook]:\n if thread:\n threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()\n else:\n logger['callback'](*args, **kwargs)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "NUM_THREADS", "path": "utils/general.py", "snippet": "NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads" }, { "identifier": "TQDM_BAR_FORMAT", "path": "utils/general.py", "snippet": "TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format" }, { "identifier": "Profile", "path": "utils/general.py", "snippet": "class Profile(contextlib.ContextDecorator):\n # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager\n def __init__(self, t=0.0):\n self.t = t\n self.cuda = torch.cuda.is_available()\n\n def __enter__(self):\n self.start = self.time()\n return self\n\n def __exit__(self, type, value, traceback):\n self.dt = self.time() - self.start # delta-time\n self.t += self.dt # accumulate dt\n\n def time(self):\n if self.cuda:\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "check_dataset", "path": "utils/general.py", "snippet": "def check_dataset(data, autodownload=True):\n # Download, check and/or unzip dataset if not found locally\n\n # Download (optional)\n extract_dir = ''\n if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):\n download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1)\n data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml'))\n extract_dir, autodownload = data.parent, False\n\n # Read yaml (optional)\n if isinstance(data, (str, Path)):\n data = yaml_load(data) # dictionary\n\n # Checks\n for k in 'train', 'val', 'names':\n assert k in data, emojis(f\"data.yaml '{k}:' field missing ❌\")\n if isinstance(data['names'], (list, tuple)): # old array format\n data['names'] = dict(enumerate(data['names'])) # convert to dict\n assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car'\n data['nc'] = len(data['names'])\n\n # Resolve paths\n path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.'\n if not path.is_absolute():\n path = (ROOT / path).resolve()\n data['path'] = path # download scripts\n for k in 'train', 'val', 'test':\n if data.get(k): # prepend path\n if isinstance(data[k], str):\n x = (path / data[k]).resolve()\n if not x.exists() and data[k].startswith('../'):\n x = (path / data[k][3:]).resolve()\n data[k] = str(x)\n else:\n data[k] = [str((path / x).resolve()) for x in data[k]]\n\n # Parse yaml\n train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))\n if val:\n val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path\n if not all(x.exists() for x in val):\n LOGGER.info('\\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])\n if not s or not autodownload:\n raise Exception('Dataset not found ❌')\n t = time.time()\n if s.startswith('http') and s.endswith('.zip'): # URL\n f = Path(s).name # filename\n LOGGER.info(f'Downloading {s} to {f}...')\n torch.hub.download_url_to_file(s, f)\n Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root\n unzip_file(f, path=DATASETS_DIR) # unzip\n Path(f).unlink() # remove zip\n r = None # success\n elif s.startswith('bash '): # bash script\n LOGGER.info(f'Running {s} ...')\n r = os.system(s)\n else: # python script\n r = exec(s, {'yaml': data}) # return None\n dt = f'({round(time.time() - t, 1)}s)'\n s = f\"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}\" if r in (0, None) else f\"failure {dt} ❌\"\n LOGGER.info(f\"Dataset download {s}\")\n check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts\n return data # dictionary" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n imgsz = list(imgsz) # convert to list if tuple\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@TryExcept()\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):\n # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, Path): # requirements.txt file\n file = requirements.resolve()\n assert file.exists(), f\"{prefix} {file} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n elif isinstance(requirements, str):\n requirements = [requirements]\n\n s = ''\n n = 0\n for r in requirements:\n try:\n pkg.require(r)\n except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met\n s += f'\"{r}\" '\n n += 1\n\n if s and install and AUTOINSTALL: # check environment variable\n LOGGER.info(f\"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...\")\n try:\n # assert check_online(), \"AutoUpdate skipped (offline)\"\n LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode())\n source = file if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n LOGGER.info(s)\n except Exception as e:\n LOGGER.warning(f'{prefix} ❌ {e}')" }, { "identifier": "check_yaml", "path": "utils/general.py", "snippet": "def check_yaml(file, suffix=('.yaml', '.yml')):\n # Search/download YAML file (if necessary) and return path, checking suffix\n return check_file(file, suffix)" }, { "identifier": "coco80_to_coco91_class", "path": "utils/general.py", "snippet": "def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\n # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco\n # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet\n return [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {\n 'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')\n\n # Method 1\n for n in range(2, 9999):\n p = f'{path}{sep}{n}{suffix}' # increment path\n if not os.path.exists(p): #\n break\n path = Path(p)\n\n # Method 2 (deprecated)\n # dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n # matches = [re.search(rf\"{path.stem}{sep}(\\d+)\", d) for d in dirs]\n # i = [int(m.groups()[0]) for m in matches if m] # indices\n # n = max(i) + 1 if i else 2 # increment number\n # path = Path(f\"{path}{sep}{n}{suffix}\") # increment path\n\n if mkdir:\n path.mkdir(parents=True, exist_ok=True) # make directory\n\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\n \"\"\"Non-Maximum Suppression (NMS) on inference results to reject overlapping detections\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out)\n prediction = prediction[0] # select only inference output\n\n device = prediction.device\n mps = 'mps' in device.type # Apple MPS\n if mps: # MPS not fully supported yet, convert tensors to CPU before NMS\n prediction = prediction.cpu()\n bs = prediction.shape[0] # batch size\n nc = prediction.shape[2] - nm - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n # min_wh = 2 # (pixels) minimum box width and height\n max_wh = 7680 # (pixels) maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 0.5 + 0.05 * bs # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n mi = 5 + nc # mask start index\n output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n lb = labels[xi]\n v = torch.zeros((len(lb), nc + nm + 5), device=x.device)\n v[:, :4] = lb[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box/Mask\n box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2)\n mask = x[:, mi:] # zero columns if no masks\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)\n else: # best class only\n conf, j = x[:, 5:mi].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n else:\n x = x[x[:, 4].argsort(descending=True)] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n #i = my_soft_nms(boxes, scores, iou_thres) \n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if mps:\n output[xi] = output[xi].to(device)\n if (time.time() - t) > time_limit:\n LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(args: Optional[dict] = None, show_file=True, show_func=False):\n # Print function arguments (optional args dict)\n x = inspect.currentframe().f_back # previous frame\n file, _, func, _, _ = inspect.getframeinfo(x)\n if args is None: # get args automatically\n args, _, _, frm = inspect.getargvalues(x)\n args = {k: v for k, v in frm.items() if k in args}\n try:\n file = Path(file).resolve().relative_to(ROOT).with_suffix('')\n except ValueError:\n file = Path(file).stem\n s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')\n LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))" }, { "identifier": "scale_boxes", "path": "utils/general.py", "snippet": "def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\n # Rescale boxes (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n boxes[:, [0, 2]] -= pad[0] # x padding\n boxes[:, [1, 3]] -= pad[1] # y padding\n boxes[:, :4] /= gain\n clip_boxes(boxes, img0_shape)\n return boxes" }, { "identifier": "xywh2xyxy", "path": "utils/general.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "ConfusionMatrix", "path": "utils/metrics.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc, conf=0.25, iou_thres=0.45):\n self.matrix = np.zeros((nc + 1, nc + 1))\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n if detections is None:\n gt_classes = labels.int()\n for gc in gt_classes:\n self.matrix[self.nc, gc] += 1 # background FN\n return\n\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(int)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[detection_classes[m1[j]], gc] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # true background\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # predicted background\n\n def tp_fp(self):\n tp = self.matrix.diagonal() # true positives\n fp = self.matrix.sum(1) - tp # false positives\n # fn = self.matrix.sum(0) - tp # false negatives (missed detections)\n return tp[:-1], fp[:-1] # remove background class\n\n @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure')\n def plot(self, normalize=True, save_dir='', names=()):\n import seaborn as sn\n\n array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)\n nc, nn = self.nc, len(names) # number of classes, names\n sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size\n labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels\n ticklabels = (names + ['background']) if labels else \"auto\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered\n sn.heatmap(array,\n ax=ax,\n annot=nc < 30,\n annot_kws={\n \"size\": 8},\n cmap='Blues',\n fmt='.2f',\n square=True,\n vmin=0.0,\n xticklabels=ticklabels,\n yticklabels=ticklabels).set_facecolor((1, 1, 1))\n ax.set_ylabel('True')\n ax.set_ylabel('Predicted')\n ax.set_title('Confusion Matrix')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n plt.close(fig)\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "box_iou", "path": "utils/metrics.py", "snippet": "def box_iou(box1, box2, eps=1e-7):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)\n inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)\n\n # IoU = inter / (area1 + area2 - inter)\n return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)" }, { "identifier": "output_to_target", "path": "utils/plots.py", "snippet": "def output_to_target(output, max_det=300):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting\n targets = []\n for i, o in enumerate(output):\n box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)\n j = torch.full((conf.shape[0], 1), i)\n targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))\n return torch.cat(targets, 0).numpy()" }, { "identifier": "plot_val_study", "path": "utils/plots.py", "snippet": "def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\n # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)\n save_dir = Path(file).parent if file else Path(dir)\n plot2 = False # plot additional results\n if plot2:\n ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:\n for f in sorted(save_dir.glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n if plot2:\n s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']\n for i in range(7):\n ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[5, 1:j],\n y[3, 1:j] * 1E2,\n '.-',\n linewidth=2,\n markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-',\n linewidth=2,\n markersize=8,\n alpha=.25,\n label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(25, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n f = save_dir / 'study.png'\n print(f'Saving {f}...')\n plt.savefig(f, dpi=300)" }, { "identifier": "create_dataloader", "path": "utils/segment/dataloaders.py", "snippet": "def create_dataloader(path,\n imgsz,\n batch_size,\n stride,\n single_cls=False,\n hyp=None,\n augment=False,\n cache=False,\n pad=0.0,\n rect=False,\n rank=-1,\n workers=8,\n image_weights=False,\n quad=False,\n prefix='',\n shuffle=False,\n mask_downsample_ratio=1,\n overlap_mask=False):\n if rect and shuffle:\n LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')\n shuffle = False\n with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP\n dataset = LoadImagesAndLabelsAndMasks(\n path,\n imgsz,\n batch_size,\n augment=augment, # augmentation\n hyp=hyp, # hyperparameters\n rect=rect, # rectangular batches\n cache_images=cache,\n single_cls=single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix,\n downsample_ratio=mask_downsample_ratio,\n overlap=overlap_mask)\n\n batch_size = min(batch_size, len(dataset))\n nd = torch.cuda.device_count() # number of CUDA devices\n nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)\n loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates\n generator = torch.Generator()\n generator.manual_seed(6148914691236517205 + RANK)\n return loader(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle and sampler is None,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn,\n worker_init_fn=seed_worker,\n generator=generator,\n ), dataset" }, { "identifier": "mask_iou", "path": "utils/segment/general.py", "snippet": "def mask_iou(mask1, mask2, eps=1e-7):\n \"\"\"\n mask1: [N, n] m1 means number of predicted objects\n mask2: [M, n] m2 means number of gt objects\n Note: n means image_w x image_h\n\n return: masks iou, [N, M]\n \"\"\"\n intersection = torch.matmul(mask1, mask2.t()).clamp(0)\n union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection\n return intersection / (union + eps)" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_upsample", "path": "utils/segment/general.py", "snippet": "def process_mask_upsample(protos, masks_in, bboxes, shape):\n \"\"\"\n Crop after upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "scale_image", "path": "utils/segment/general.py", "snippet": "def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):\n \"\"\"\n img1_shape: model input shape, [h, w]\n img0_shape: origin pic shape, [h, w, 3]\n masks: [h, w, num]\n \"\"\"\n # Rescale coordinates (xyxy) from im1_shape to im0_shape\n if ratio_pad is None: # calculate from im0_shape\n gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new\n pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding\n else:\n pad = ratio_pad[1]\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])\n\n if len(masks.shape) < 2:\n raise ValueError(f'\"len of masks shape\" should be 2 or 3, but got {len(masks.shape)}')\n masks = masks[top:bottom, left:right]\n # masks = masks.permute(2, 0, 1).contiguous()\n # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]\n # masks = masks.permute(1, 2, 0).contiguous()\n masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))\n\n if len(masks.shape) == 2:\n masks = masks[:, :, None]\n return masks" }, { "identifier": "Metrics", "path": "utils/segment/metrics.py", "snippet": "class Metrics:\n \"\"\"Metric for boxes and masks.\"\"\"\n\n def __init__(self) -> None:\n self.metric_box = Metric()\n self.metric_mask = Metric()\n\n def update(self, results):\n \"\"\"\n Args:\n results: Dict{'boxes': Dict{}, 'masks': Dict{}}\n \"\"\"\n self.metric_box.update(list(results[\"boxes\"].values()))\n self.metric_mask.update(list(results[\"masks\"].values()))\n\n def mean_results(self):\n return self.metric_box.mean_results() + self.metric_mask.mean_results()\n\n def class_result(self, i):\n return self.metric_box.class_result(i) + self.metric_mask.class_result(i)\n\n def get_maps(self, nc):\n return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)\n\n @property\n def ap_class_index(self):\n # boxes and masks have the same ap_class_index\n return self.metric_box.ap_class_index" }, { "identifier": "ap_per_class_box_and_mask", "path": "utils/segment/metrics.py", "snippet": "def ap_per_class_box_and_mask(\n tp_m,\n tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=False,\n save_dir=\".\",\n names=(),\n):\n \"\"\"\n Args:\n tp_b: tp of boxes.\n tp_m: tp of masks.\n other arguments see `func: ap_per_class`.\n \"\"\"\n results_boxes = ap_per_class(tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix=\"Box\")[2:]\n results_masks = ap_per_class(tp_m,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix=\"Mask\")[2:]\n\n results = {\n \"boxes\": {\n \"p\": results_boxes[0],\n \"r\": results_boxes[1],\n \"ap\": results_boxes[3],\n \"f1\": results_boxes[2],\n \"ap_class\": results_boxes[4]},\n \"masks\": {\n \"p\": results_masks[0],\n \"r\": results_masks[1],\n \"ap\": results_masks[3],\n \"f1\": results_masks[2],\n \"ap_class\": results_masks[4]}}\n return results" }, { "identifier": "plot_images_and_masks", "path": "utils/segment/plots.py", "snippet": "@threaded\ndef plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None):\n # Plot image grid with labels\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n if isinstance(masks, torch.Tensor):\n masks = masks.cpu().numpy().astype(int)\n\n max_size = 1920 # max image size\n max_subplots = 16 # max image subplots, i.e. 4x4\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n if np.max(images[0]) <= 1:\n images *= 255 # de-normalise (optional)\n\n # Build Image\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, im in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n im = im.transpose(1, 2, 0)\n mosaic[y:y + h, x:x + w, :] = im\n\n # Resize (optional)\n scale = max_size / ns / max(h, w)\n if scale < 1:\n h = math.ceil(scale * h)\n w = math.ceil(scale * w)\n mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))\n\n # Annotate\n fs = int((h + w) * ns * 0.01) # font size\n annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)\n for i in range(i + 1):\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders\n if paths:\n annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames\n if len(targets) > 0:\n idx = targets[:, 0] == i\n ti = targets[idx] # image targets\n\n boxes = xywh2xyxy(ti[:, 2:6]).T\n classes = ti[:, 1].astype('int')\n labels = ti.shape[1] == 6 # labels if no conf column\n conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale < 1: # absolute coords need scale if image scales\n boxes *= scale\n boxes[[0, 2]] += x\n boxes[[1, 3]] += y\n for j, box in enumerate(boxes.T.tolist()):\n cls = classes[j]\n color = colors(cls)\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'\n annotator.box_label(box, label, color=color)\n\n # Plot masks\n if len(masks):\n if masks.max() > 1.0: # mean that masks are overlap\n image_masks = masks[[i]] # (1, 640, 640)\n nl = len(ti)\n index = np.arange(nl).reshape(nl, 1, 1) + 1\n image_masks = np.repeat(image_masks, nl, axis=0)\n image_masks = np.where(image_masks == index, 1.0, 0.0)\n else:\n image_masks = masks[idx]\n\n im = np.asarray(annotator.im).copy()\n for j, box in enumerate(boxes.T.tolist()):\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n color = colors(classes[j])\n mh, mw = image_masks[j].shape\n if mh != h or mw != w:\n mask = image_masks[j].astype(np.uint8)\n mask = cv2.resize(mask, (w, h))\n mask = mask.astype(bool)\n else:\n mask = image_masks[j].astype(bool)\n with contextlib.suppress(Exception):\n im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6\n annotator.fromarray(im)\n annotator.im.save(fname) # save" }, { "identifier": "de_parallel", "path": "utils/torch_utils.py", "snippet": "def de_parallel(model):\n # De-parallelize a model: returns single-GPU model if model is of type DP or DDP\n return model.module if is_parallel(model) else model" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import json import os import sys import numpy as np import torch import torch.nn.functional as F import time from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
20,150
x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", "mAP50", "mAP50-95)") dt = Profile(), Profile(), Profile()
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] rle["counts"] = rle["counts"].decode("utf-8") return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", "mAP50", "mAP50-95)") dt = Profile(), Profile(), Profile()
metrics = Metrics()
28
2023-12-10 14:18:29+00:00
24k
youngskkim/CRN
exps/base_exp.py
[ { "identifier": "NuscDatasetRadarDet", "path": "datasets/nusc_det_dataset.py", "snippet": "class NuscDatasetRadarDet(Dataset):\n def __init__(self,\n ida_aug_conf,\n bda_aug_conf,\n rda_aug_conf,\n classes,\n data_root,\n info_paths,\n is_train,\n load_interval=1,\n num_sweeps=1,\n img_conf=dict(img_mean=[123.675, 116.28, 103.53],\n img_std=[58.395, 57.12, 57.375],\n to_rgb=True),\n img_backbone_conf=dict(\n x_bound=[-51.2, 51.2, 0.8],\n y_bound=[-51.2, 51.2, 0.8],\n z_bound=[-5, 3, 8],\n d_bound=[2.0, 58.0, 0.5]\n ),\n drop_aug_conf=None,\n return_image=True,\n return_depth=False,\n return_radar_pv=False,\n depth_path='depth_gt',\n radar_pv_path='radar_pv_filter',\n remove_z_axis=False,\n use_cbgs=False,\n gt_for_radar_only=False,\n sweep_idxes=list(),\n key_idxes=list()):\n \"\"\"Dataset used for bevdetection task.\n Args:\n ida_aug_conf (dict): Config for ida augmentation.\n bda_aug_conf (dict): Config for bda augmentation.\n classes (list): Class names.\n use_cbgs (bool): Whether to use cbgs strategy,\n Default: False.\n num_sweeps (int): Number of sweeps to be used for each sample.\n default: 1.\n img_conf (dict): Config for image.\n return_depth (bool): Whether to use depth gt.\n default: False.\n sweep_idxes (list): List of sweep idxes to be used.\n default: list().\n key_idxes (list): List of key idxes to be used.\n default: list().\n \"\"\"\n super().__init__()\n if isinstance(info_paths, list):\n self.infos = list()\n for info_path in info_paths:\n self.infos.extend(mmcv.load(info_path))\n else:\n self.infos = mmcv.load(info_paths)\n self.is_train = is_train\n self.ida_aug_conf = ida_aug_conf\n self.bda_aug_conf = bda_aug_conf\n self.rda_aug_conf = rda_aug_conf\n self.drop_aug_conf = drop_aug_conf\n self.data_root = data_root\n self.classes = classes\n self.use_cbgs = use_cbgs\n if self.use_cbgs:\n self.cat2id = {name: i for i, name in enumerate(self.classes)}\n self.sample_indices = self._get_sample_indices()\n self.num_sweeps = num_sweeps\n self.img_mean = np.array(img_conf['img_mean'], np.float32)\n self.img_std = np.array(img_conf['img_std'], np.float32)\n self.to_rgb = img_conf['to_rgb']\n self.img_backbone_conf = img_backbone_conf\n\n self.return_image = return_image\n self.return_depth = return_depth\n self.return_radar_pv = return_radar_pv\n\n self.remove_z_axis = remove_z_axis\n self.gt_for_radar_only = gt_for_radar_only\n\n assert sum([sweep_idx >= 0 for sweep_idx in sweep_idxes]) \\\n == len(sweep_idxes), 'All `sweep_idxes` must greater \\\n than or equal to 0.'\n\n self.sweeps_idx = sweep_idxes\n assert sum([key_idx < 0 for key_idx in key_idxes]) == len(key_idxes),\\\n 'All `key_idxes` must less than 0.'\n self.key_idxes = [0] + key_idxes\n if load_interval > 1:\n self.infos = self.infos[::load_interval]\n self.depth_path = depth_path\n self.radar_pv_path = radar_pv_path\n\n self.max_radar_points_pv = 1536\n self.max_distance_pv = self.img_backbone_conf['d_bound'][1]\n\n def _get_sample_indices(self):\n \"\"\"Load annotations from ann_file.\n\n Args:\n ann_file (str): Path of the annotation file.\n\n Returns:\n list[dict]: List of annotations after class sampling.\n \"\"\"\n class_sample_idxs = {cat_id: [] for cat_id in self.cat2id.values()}\n for idx, info in enumerate(self.infos):\n gt_names = set(\n [ann_info['category_name'] for ann_info in info['ann_infos']])\n for gt_name in gt_names:\n gt_name = map_name_from_general_to_detection[gt_name]\n if gt_name not in self.classes:\n continue\n class_sample_idxs[self.cat2id[gt_name]].append(idx)\n duplicated_samples = sum(\n [len(v) for _, v in class_sample_idxs.items()])\n class_distribution = {\n k: len(v) / duplicated_samples\n for k, v in class_sample_idxs.items()\n }\n\n sample_indices = []\n\n frac = 1.0 / len(self.classes)\n ratios = [frac / v for v in class_distribution.values()]\n for cls_inds, ratio in zip(list(class_sample_idxs.values()), ratios):\n sample_indices += np.random.choice(cls_inds,\n int(len(cls_inds) *\n ratio)).tolist()\n return sample_indices\n\n def sample_ida_augmentation(self):\n \"\"\"Generate ida augmentation values based on ida_config.\"\"\"\n H, W = self.ida_aug_conf['H'], self.ida_aug_conf['W']\n fH, fW = self.ida_aug_conf['final_dim']\n if self.is_train:\n resize = np.random.uniform(*self.ida_aug_conf['resize_lim'])\n resize_dims = (int(W * resize), int(H * resize))\n newW, newH = resize_dims\n crop_h = int(\n (1 - np.random.uniform(*self.ida_aug_conf['bot_pct_lim'])) *\n newH) - fH\n crop_w = int(np.random.uniform(0, max(0, newW - fW)))\n crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)\n flip = False\n if self.ida_aug_conf['rand_flip'] and np.random.choice([0, 1]):\n flip = True\n rotate_ida = np.random.uniform(*self.ida_aug_conf['rot_lim'])\n else:\n resize = max(fH / H, fW / W)\n resize_dims = (int(W * resize), int(H * resize))\n newW, newH = resize_dims\n crop_h = int(\n (1 - np.mean(self.ida_aug_conf['bot_pct_lim'])) * newH) - fH\n crop_w = int(max(0, newW - fW) / 2)\n crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)\n flip = False\n rotate_ida = 0\n return resize, resize_dims, crop, flip, rotate_ida\n\n def sample_bda_augmentation(self):\n \"\"\"Generate bda augmentation values based on bda_config.\"\"\"\n if self.is_train:\n if np.random.uniform() < self.bda_aug_conf['rot_ratio']:\n rotate_bda = np.random.uniform(*self.bda_aug_conf['rot_lim'])\n else:\n rotate_bda = 0\n scale_bda = np.random.uniform(*self.bda_aug_conf['scale_lim'])\n flip_dx = np.random.uniform() < self.bda_aug_conf['flip_dx_ratio']\n flip_dy = np.random.uniform() < self.bda_aug_conf['flip_dy_ratio']\n else:\n rotate_bda = 0\n scale_bda = 1.0\n flip_dx = False\n flip_dy = False\n return rotate_bda, scale_bda, flip_dx, flip_dy\n\n def sample_radar_augmentation(self):\n \"\"\"Generate bda augmentation values based on bda_config.\"\"\"\n if self.is_train:\n radar_idx = np.random.choice(self.rda_aug_conf['N_sweeps'],\n self.rda_aug_conf['N_use'],\n replace=False)\n else:\n radar_idx = np.arange(self.rda_aug_conf['N_sweeps'])\n return radar_idx\n\n def transform_radar_pv(self, points, resize, resize_dims, crop, flip, rotate, radar_idx):\n points = points[points[:, 2] < self.max_distance_pv, :]\n\n H, W = resize_dims\n points[:, :2] = points[:, :2] * resize\n points[:, 0] -= crop[0]\n points[:, 1] -= crop[1]\n if flip:\n points[:, 0] = resize_dims[1] - points[:, 0]\n\n points[:, 0] -= W / 2.0\n points[:, 1] -= H / 2.0\n\n h = rotate / 180 * np.pi\n rot_matrix = [\n [np.cos(h), np.sin(h)],\n [-np.sin(h), np.cos(h)],\n ]\n points[:, :2] = np.matmul(rot_matrix, points[:, :2].T).T\n\n points[:, 0] += W / 2.0\n points[:, 1] += H / 2.0\n\n depth_coords = points[:, :2].astype(np.int16)\n\n valid_mask = ((depth_coords[:, 1] < resize_dims[0])\n & (depth_coords[:, 0] < resize_dims[1])\n & (depth_coords[:, 1] >= 0)\n & (depth_coords[:, 0] >= 0))\n\n points = torch.Tensor(points[valid_mask])\n\n if self.remove_z_axis:\n points[:, 1] = 1. # dummy height value\n\n points_save = []\n for i in radar_idx:\n points_save.append(points[points[:, 6] == i])\n points = torch.cat(points_save, dim=0)\n\n # mean, std of rcs and speed are from train set\n points[:, 3] = (points[:, 3] - 4.783) / 7.576\n points[:, 4] = (torch.norm(points[:, 4:6], dim=1) - 0.677) / 1.976\n\n if self.is_train:\n drop_idx = np.random.uniform(size=points.shape[0]) # randomly drop points\n points = points[drop_idx > self.rda_aug_conf['drop_ratio']]\n\n num_points, num_feat = points.shape\n if num_points > self.max_radar_points_pv:\n choices = np.random.choice(num_points, self.max_radar_points_pv, replace=False)\n points = points[choices]\n else:\n num_append = self.max_radar_points_pv - num_points\n points = torch.cat([points, -999*torch.ones(num_append, num_feat)], dim=0)\n\n if num_points == 0:\n points[0, :] = points.new_tensor([0.1, 0.1, self.max_distance_pv-1, 0, 0, 0, 0])\n\n points[..., [0, 1, 2]] = points[..., [0, 2, 1]] # convert [w, h, d] to [w, d, h]\n\n return points[..., :5]\n\n def depth_transform(self, cam_depth, resize, resize_dims, crop, flip, rotate):\n \"\"\"Transform depth based on ida augmentation configuration.\n\n Args:\n cam_depth (np array): Nx3, 3: x,y,d.\n resize (float): Resize factor.\n resize_dims (tuple): Final dimension.\n crop (tuple): x1, y1, x2, y2\n flip (bool): Whether to flip.\n rotate (float): Rotation value.\n\n Returns:\n np array: [h/down_ratio, w/down_ratio, d]\n \"\"\"\n valid_depth = cam_depth[:, 2] < self.img_backbone_conf['d_bound'][1]\n cam_depth = cam_depth[valid_depth, :]\n\n H, W = resize_dims\n cam_depth[:, :2] = cam_depth[:, :2] * resize\n cam_depth[:, 0] -= crop[0]\n cam_depth[:, 1] -= crop[1]\n if flip:\n cam_depth[:, 0] = resize_dims[1] - cam_depth[:, 0]\n\n cam_depth[:, 0] -= W / 2.0\n cam_depth[:, 1] -= H / 2.0\n\n h = rotate / 180 * np.pi\n rot_matrix = [\n [np.cos(h), np.sin(h)],\n [-np.sin(h), np.cos(h)],\n ]\n cam_depth[:, :2] = np.matmul(rot_matrix, cam_depth[:, :2].T).T\n\n cam_depth[:, 0] += W / 2.0\n cam_depth[:, 1] += H / 2.0\n\n depth_coords = cam_depth[:, :2].astype(np.int16)\n\n depth_map = np.zeros(resize_dims)\n valid_mask = ((depth_coords[:, 1] < resize_dims[0])\n & (depth_coords[:, 0] < resize_dims[1])\n & (depth_coords[:, 1] >= 0)\n & (depth_coords[:, 0] >= 0))\n depth_map[depth_coords[valid_mask, 1],\n depth_coords[valid_mask, 0]] = cam_depth[valid_mask, 2]\n\n return torch.Tensor(depth_map)\n\n def get_image(self, cam_infos, cams):\n \"\"\"Given data and cam_names, return image data needed.\n\n Args:\n sweeps_data (list): Raw data used to generate the data we needed.\n cams (list): Camera names.\n\n Returns:\n Tensor: Image data after processing.\n Tensor: Transformation matrix from camera to ego.\n Tensor: Intrinsic matrix.\n Tensor: Transformation matrix for ida.\n Tensor: Transformation matrix from key\n frame camera to sweep frame camera.\n Tensor: timestamps.\n dict: meta infos needed for evaluation.\n \"\"\"\n assert len(cam_infos) > 0\n sweep_imgs = list()\n sweep_sensor2ego_mats = list()\n sweep_intrin_mats = list()\n sweep_ida_mats = list()\n sweep_sensor2sensor_mats = list()\n sweep_timestamps = list()\n sweep_gt_depths = list()\n sweep_radar_points = list()\n for cam in cams:\n imgs = list()\n sensor2ego_mats = list()\n intrin_mats = list()\n ida_mats = list()\n sensor2sensor_mats = list()\n timestamps = list()\n gt_depths = list()\n radar_points = list()\n key_info = cam_infos[0]\n resize, resize_dims, crop, flip, \\\n rotate_ida = self.sample_ida_augmentation()\n radar_idx = self.sample_radar_augmentation()\n\n for sweep_idx, cam_info in enumerate(cam_infos):\n img = Image.open(\n os.path.join(self.data_root, cam_info[cam]['filename']))\n\n w, x, y, z = cam_info[cam]['calibrated_sensor']['rotation']\n # sweep sensor to sweep ego\n sweepsensor2sweepego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepsensor2sweepego_tran = torch.Tensor(\n cam_info[cam]['calibrated_sensor']['translation'])\n sweepsensor2sweepego = sweepsensor2sweepego_rot.new_zeros(\n (4, 4))\n sweepsensor2sweepego[3, 3] = 1\n sweepsensor2sweepego[:3, :3] = sweepsensor2sweepego_rot\n sweepsensor2sweepego[:3, -1] = sweepsensor2sweepego_tran\n # sweep ego to global\n w, x, y, z = cam_info[cam]['ego_pose']['rotation']\n sweepego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepego2global_tran = torch.Tensor(\n cam_info[cam]['ego_pose']['translation'])\n sweepego2global = sweepego2global_rot.new_zeros((4, 4))\n sweepego2global[3, 3] = 1\n sweepego2global[:3, :3] = sweepego2global_rot\n sweepego2global[:3, -1] = sweepego2global_tran\n\n # global sensor to cur ego\n w, x, y, z = key_info[cam]['ego_pose']['rotation']\n keyego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keyego2global_tran = torch.Tensor(\n key_info[cam]['ego_pose']['translation'])\n keyego2global = keyego2global_rot.new_zeros((4, 4))\n keyego2global[3, 3] = 1\n keyego2global[:3, :3] = keyego2global_rot\n keyego2global[:3, -1] = keyego2global_tran\n global2keyego = keyego2global.inverse()\n\n # cur ego to sensor\n w, x, y, z = key_info[cam]['calibrated_sensor']['rotation']\n keysensor2keyego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keysensor2keyego_tran = torch.Tensor(\n key_info[cam]['calibrated_sensor']['translation'])\n keysensor2keyego = keysensor2keyego_rot.new_zeros((4, 4))\n keysensor2keyego[3, 3] = 1\n keysensor2keyego[:3, :3] = keysensor2keyego_rot\n keysensor2keyego[:3, -1] = keysensor2keyego_tran\n keyego2keysensor = keysensor2keyego.inverse()\n keysensor2sweepsensor = (\n keyego2keysensor @ global2keyego @ sweepego2global\n @ sweepsensor2sweepego).inverse()\n sweepsensor2keyego = global2keyego @ sweepego2global @\\\n sweepsensor2sweepego\n sensor2ego_mats.append(sweepsensor2keyego)\n sensor2sensor_mats.append(keysensor2sweepsensor)\n intrin_mat = torch.zeros((4, 4))\n intrin_mat[3, 3] = 1\n intrin_mat[:3, :3] = torch.Tensor(\n cam_info[cam]['calibrated_sensor']['camera_intrinsic'])\n\n file_name = os.path.split(cam_info[cam]['filename'])[-1]\n if self.return_depth:\n point_depth = np.fromfile(os.path.join(\n self.data_root, self.depth_path, f'{file_name}.bin'),\n dtype=np.float32,\n count=-1)\n point_depth = point_depth.reshape(-1, 3)\n point_depth_augmented = self.depth_transform(\n point_depth, resize, self.ida_aug_conf['final_dim'],\n crop, flip, rotate_ida)\n gt_depths.append(point_depth_augmented)\n\n if self.return_radar_pv:\n radar_point = np.fromfile(os.path.join(\n self.data_root, self.radar_pv_path, f'{file_name}.bin'),\n dtype=np.float32,\n count=-1).reshape(-1, 7)\n radar_point_augmented = self.transform_radar_pv(\n radar_point, resize, self.ida_aug_conf['final_dim'],\n crop, flip, rotate_ida, radar_idx)\n radar_points.append(radar_point_augmented)\n\n img, ida_mat = img_transform(\n img,\n resize=resize,\n resize_dims=resize_dims,\n crop=crop,\n flip=flip,\n rotate=rotate_ida,\n )\n ida_mats.append(ida_mat)\n img = mmcv.imnormalize(np.array(img), self.img_mean,\n self.img_std, self.to_rgb)\n img = torch.from_numpy(img).permute(2, 0, 1)\n imgs.append(img)\n intrin_mats.append(intrin_mat)\n timestamps.append(cam_info[cam]['timestamp'])\n sweep_imgs.append(torch.stack(imgs))\n sweep_sensor2ego_mats.append(torch.stack(sensor2ego_mats))\n sweep_intrin_mats.append(torch.stack(intrin_mats))\n sweep_ida_mats.append(torch.stack(ida_mats))\n sweep_sensor2sensor_mats.append(torch.stack(sensor2sensor_mats))\n sweep_timestamps.append(torch.tensor(timestamps))\n if self.return_depth:\n sweep_gt_depths.append(torch.stack(gt_depths))\n if self.return_radar_pv:\n sweep_radar_points.append(torch.stack(radar_points))\n\n ret_list = [\n torch.stack(sweep_imgs).permute(1, 0, 2, 3, 4),\n torch.stack(sweep_sensor2ego_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_intrin_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_ida_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_sensor2sensor_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_timestamps).permute(1, 0),\n ]\n if self.return_depth:\n ret_list.append(torch.stack(sweep_gt_depths).permute(1, 0, 2, 3),)\n else:\n ret_list.append(None)\n if self.return_radar_pv:\n ret_list.append(torch.stack(sweep_radar_points).permute(1, 0, 2, 3),)\n else:\n ret_list.append(None)\n return ret_list\n\n def get_image_meta(self, cam_infos, cams):\n key_info = cam_infos[0]\n\n # Get mean pose of all cams.\n ego2global_rotation = np.mean(\n [key_info[cam]['ego_pose']['rotation'] for cam in cams], 0)\n ego2global_translation = np.mean(\n [key_info[cam]['ego_pose']['translation'] for cam in cams], 0)\n img_metas = dict(\n box_type_3d=LiDARInstance3DBoxes,\n ego2global_translation=ego2global_translation,\n ego2global_rotation=ego2global_rotation,\n )\n return img_metas\n\n def get_image_sensor2ego_mats(self, cam_infos, cams):\n sweep_sensor2ego_mats = list()\n for cam in cams:\n sensor2ego_mats = list()\n key_info = cam_infos[0]\n for sweep_idx, cam_info in enumerate(cam_infos):\n w, x, y, z = cam_info[cam]['calibrated_sensor']['rotation']\n # sweep sensor to sweep ego\n sweepsensor2sweepego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepsensor2sweepego_tran = torch.Tensor(\n cam_info[cam]['calibrated_sensor']['translation'])\n sweepsensor2sweepego = sweepsensor2sweepego_rot.new_zeros(\n (4, 4))\n sweepsensor2sweepego[3, 3] = 1\n sweepsensor2sweepego[:3, :3] = sweepsensor2sweepego_rot\n sweepsensor2sweepego[:3, -1] = sweepsensor2sweepego_tran\n # sweep ego to global\n w, x, y, z = cam_info[cam]['ego_pose']['rotation']\n sweepego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepego2global_tran = torch.Tensor(\n cam_info[cam]['ego_pose']['translation'])\n sweepego2global = sweepego2global_rot.new_zeros((4, 4))\n sweepego2global[3, 3] = 1\n sweepego2global[:3, :3] = sweepego2global_rot\n sweepego2global[:3, -1] = sweepego2global_tran\n\n # global sensor to cur ego\n w, x, y, z = key_info[cam]['ego_pose']['rotation']\n keyego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keyego2global_tran = torch.Tensor(\n key_info[cam]['ego_pose']['translation'])\n keyego2global = keyego2global_rot.new_zeros((4, 4))\n keyego2global[3, 3] = 1\n keyego2global[:3, :3] = keyego2global_rot\n keyego2global[:3, -1] = keyego2global_tran\n global2keyego = keyego2global.inverse()\n\n # cur ego to sensor\n w, x, y, z = key_info[cam]['calibrated_sensor']['rotation']\n keysensor2keyego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keysensor2keyego_tran = torch.Tensor(\n key_info[cam]['calibrated_sensor']['translation'])\n keysensor2keyego = keysensor2keyego_rot.new_zeros((4, 4))\n keysensor2keyego[3, 3] = 1\n keysensor2keyego[:3, :3] = keysensor2keyego_rot\n keysensor2keyego[:3, -1] = keysensor2keyego_tran\n sweepsensor2keyego = global2keyego @ sweepego2global @\\\n sweepsensor2sweepego\n sensor2ego_mats.append(sweepsensor2keyego)\n sweep_sensor2ego_mats.append(torch.stack(sensor2ego_mats))\n return torch.stack(sweep_sensor2ego_mats).permute(1, 0, 2, 3)\n\n def get_gt(self, info, cams, return_corners=False):\n \"\"\"Generate gt labels from info.\n\n Args:\n info(dict): Infos needed to generate gt labels.\n cams(list): Camera names.\n\n Returns:\n Tensor: GT bboxes.\n Tensor: GT labels.\n \"\"\"\n ego2global_rotation = np.mean(\n [info['cam_infos'][cam]['ego_pose']['rotation'] for cam in cams],\n 0)\n ego2global_translation = np.mean([\n info['cam_infos'][cam]['ego_pose']['translation'] for cam in cams\n ], 0)\n trans = -np.array(ego2global_translation)\n rot = Quaternion(ego2global_rotation).inverse\n gt_boxes = list()\n gt_labels = list()\n if return_corners: # for debugging and visualization\n gt_corners = list()\n else:\n gt_corners = None\n for ann_info in info['ann_infos']:\n # Use ego coordinate.\n if self.gt_for_radar_only:\n if ann_info['num_radar_pts'] == 0:\n continue\n if map_name_from_general_to_detection[ann_info['category_name']] not in self.classes:\n continue\n if ann_info['num_lidar_pts'] + ann_info['num_radar_pts'] == 0:\n continue\n\n box = Box(\n ann_info['translation'],\n ann_info['size'],\n Quaternion(ann_info['rotation']),\n velocity=ann_info['velocity'],\n )\n box.translate(trans)\n box.rotate(rot)\n box_xyz = np.array(box.center)\n box_dxdydz = np.array(box.wlh)[[1, 0, 2]]\n box_yaw = np.array([box.orientation.yaw_pitch_roll[0]])\n box_velo = np.array(box.velocity[:2])\n gt_box = np.concatenate([box_xyz, box_dxdydz, box_yaw, box_velo])\n gt_boxes.append(gt_box)\n gt_labels.append(\n self.classes.index(map_name_from_general_to_detection[\n ann_info['category_name']]))\n if return_corners: # for debugging and visualization\n gt_corners.append(box.corners())\n\n return torch.Tensor(gt_boxes), torch.tensor(gt_labels), gt_corners\n\n def choose_cams(self):\n \"\"\"Choose cameras randomly.\n\n Returns:\n list: Cameras to be used.\n \"\"\"\n if self.is_train and self.ida_aug_conf['Ncams'] < len(\n self.ida_aug_conf['cams']):\n cams = np.random.choice(self.ida_aug_conf['cams'],\n self.ida_aug_conf['Ncams'],\n replace=False)\n else:\n cams = self.ida_aug_conf['cams']\n return cams\n\n def __getitem__(self, idx):\n if self.use_cbgs:\n idx = self.sample_indices[idx]\n cam_infos = list()\n pts_infos = list()\n cams = self.choose_cams()\n for key_idx in self.key_idxes:\n cur_idx = key_idx + idx\n # Handle scenarios when current idx doesn't have previous key\n # frame or previous key frame is from another scene.\n while self.infos[cur_idx]['scene_token'] != self.infos[idx]['scene_token']:\n cur_idx += 1\n info = self.infos[cur_idx]\n cam_infos.append(info['cam_infos'])\n pts_infos.append([info['lidar_infos']] + info['lidar_sweeps'])\n for sweep_idx in self.sweeps_idx:\n if len(info['cam_sweeps']) == 0:\n cam_infos.append(info['cam_infos'])\n else:\n # Handle scenarios when current sweep doesn't have all cam keys.\n for i in range(min(len(info['cam_sweeps']) - 1, sweep_idx), -1,\n -1):\n if sum([cam in info['cam_sweeps'][i]\n for cam in cams]) == len(cams):\n cam_infos.append(info['cam_sweeps'][i])\n break\n\n if self.return_image or self.return_depth or self.return_radar_pv:\n image_data_list = self.get_image(cam_infos, cams)\n (\n sweep_imgs,\n sweep_sensor2ego_mats,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n sweep_timestamps,\n ) = image_data_list[:6]\n else:\n (\n sweep_imgs,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n sweep_timestamps,\n ) = None, None, None, None, None\n sweep_sensor2ego_mats = self.get_image_sensor2ego_mats(cam_infos, cams)\n\n img_metas = self.get_image_meta(cam_infos, cams)\n img_metas['token'] = self.infos[idx]['sample_token']\n gt_boxes_3d, gt_labels_3d, gt_corners = self.get_gt(self.infos[idx], cams, return_corners=False)\n\n rotate_bda, scale_bda, flip_dx, flip_dy = self.sample_bda_augmentation()\n gt_boxes_3d, bda_rot = bev_det_transform(gt_boxes_3d, rotate_bda, scale_bda, flip_dx, flip_dy)\n\n bda_mat = torch.zeros(4, 4, dtype=torch.float32)\n bda_mat[:3, :3] = bda_rot\n bda_mat[3, 3] = 1\n\n ret_list = [\n sweep_imgs,\n sweep_sensor2ego_mats,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n bda_mat,\n sweep_timestamps,\n img_metas,\n gt_boxes_3d,\n gt_labels_3d,\n ]\n\n if self.return_depth:\n ret_list.append(image_data_list[6])\n else:\n ret_list.append(None)\n if self.return_radar_pv:\n ret_list.append(image_data_list[7])\n else:\n ret_list.append(None)\n\n return ret_list\n\n def __str__(self):\n return f\"\"\"NuscData: {len(self)} samples. Split: \\\n {\"train\" if self.is_train else \"val\"}.\n Augmentation Conf: {self.ida_aug_conf}\"\"\"\n\n def __len__(self):\n if self.use_cbgs:\n return len(self.sample_indices)\n else:\n return len(self.infos)" }, { "identifier": "collate_fn", "path": "datasets/nusc_det_dataset.py", "snippet": "def collate_fn(data,\n is_return_image=True,\n is_return_depth=False,\n is_return_radar_pv=False):\n assert (is_return_image or is_return_depth or is_return_radar_pv) is True\n imgs_batch = list()\n sensor2ego_mats_batch = list()\n intrin_mats_batch = list()\n ida_mats_batch = list()\n sensor2sensor_mats_batch = list()\n bda_mat_batch = list()\n gt_boxes_3d_batch = list()\n gt_labels_3d_batch = list()\n img_metas_batch = list()\n depth_labels_batch = list()\n radar_pv_batch = list()\n\n for iter_data in data:\n (\n sweep_imgs,\n sweep_sensor2ego_mats,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n bda_mat,\n sweep_timestamps,\n img_metas,\n gt_boxes,\n gt_labels,\n ) = iter_data[:10]\n if is_return_depth:\n gt_depth = iter_data[10]\n depth_labels_batch.append(gt_depth)\n if is_return_radar_pv:\n radar_pv = iter_data[11]\n radar_pv_batch.append(radar_pv)\n\n imgs_batch.append(sweep_imgs)\n sensor2ego_mats_batch.append(sweep_sensor2ego_mats)\n intrin_mats_batch.append(sweep_intrins)\n ida_mats_batch.append(sweep_ida_mats)\n sensor2sensor_mats_batch.append(sweep_sensor2sensor_mats)\n bda_mat_batch.append(bda_mat)\n img_metas_batch.append(img_metas)\n gt_boxes_3d_batch.append(gt_boxes)\n gt_labels_3d_batch.append(gt_labels)\n\n if is_return_image:\n mats_dict = dict()\n mats_dict['sensor2ego_mats'] = torch.stack(sensor2ego_mats_batch)\n mats_dict['intrin_mats'] = torch.stack(intrin_mats_batch)\n mats_dict['ida_mats'] = torch.stack(ida_mats_batch)\n mats_dict['sensor2sensor_mats'] = torch.stack(sensor2sensor_mats_batch)\n mats_dict['bda_mat'] = torch.stack(bda_mat_batch)\n ret_list = [\n torch.stack(imgs_batch),\n mats_dict,\n img_metas_batch,\n gt_boxes_3d_batch,\n gt_labels_3d_batch,\n None, # reserve for segmentation\n ]\n else:\n ret_list = [\n None,\n None,\n img_metas_batch,\n gt_boxes_3d_batch,\n gt_labels_3d_batch,\n None,\n ]\n if is_return_depth:\n ret_list.append(torch.stack(depth_labels_batch))\n else:\n ret_list.append(None)\n if is_return_radar_pv:\n ret_list.append(torch.stack(radar_pv_batch))\n else:\n ret_list.append(None)\n\n return ret_list" }, { "identifier": "DetNuscEvaluator", "path": "evaluators/det_evaluators.py", "snippet": "class DetNuscEvaluator():\n ErrNameMapping = {\n 'trans_err': 'mATE',\n 'scale_err': 'mASE',\n 'orient_err': 'mAOE',\n 'vel_err': 'mAVE',\n 'attr_err': 'mAAE',\n }\n\n DefaultAttribute = {\n 'car': 'vehicle.parked',\n 'pedestrian': 'pedestrian.moving',\n 'trailer': 'vehicle.parked',\n 'truck': 'vehicle.parked',\n 'bus': 'vehicle.moving',\n 'motorcycle': 'cycle.without_rider',\n 'construction_vehicle': 'vehicle.parked',\n 'bicycle': 'cycle.without_rider',\n 'barrier': '',\n 'traffic_cone': '',\n }\n\n def __init__(\n self,\n class_names,\n eval_version='detection_cvpr_2019',\n data_root='./data/nuScenes',\n version='v1.0-trainval',\n modality=dict(use_lidar=False,\n use_camera=True,\n use_radar=True,\n use_map=False,\n use_external=False),\n output_dir=None,\n ) -> None:\n self.eval_version = eval_version\n self.data_root = data_root\n\n # Load config file and deserialize it.\n this_dir = osp.dirname(osp.abspath(__file__))\n with open(osp.join(this_dir, 'configs', '%s.json' % eval_version), 'r') as f:\n data = json.load(f)\n self.eval_detection_configs = DetectionConfig.deserialize(data)\n\n self.version = version\n self.class_names = class_names\n self.modality = modality\n self.output_dir = output_dir\n\n def _evaluate_single(self,\n result_path,\n logger=None,\n metric='bbox',\n result_name='pts_bbox'):\n \"\"\"Evaluation for a single model in nuScenes protocol.\n\n Args:\n result_path (str): Path of the result file.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n metric (str): Metric name used for evaluation. Default: 'bbox'.\n result_name (str): Result name in the metric prefix.\n Default: 'pts_bbox'.\n\n Returns:\n dict: Dictionary of evaluation details.\n \"\"\"\n from nuscenes import NuScenes\n from nuscenes.eval.detection.evaluate import NuScenesEval\n\n output_dir = osp.join(*osp.split(result_path)[:-1])\n nusc = NuScenes(version=self.version,\n dataroot=self.data_root,\n verbose=False)\n eval_set_map = {\n 'v1.0-mini': 'mini_val',\n 'v1.0-trainval': 'val',\n }\n nusc_eval = NuScenesEval(nusc,\n config=self.eval_detection_configs,\n result_path=result_path,\n eval_set=eval_set_map[self.version],\n output_dir=output_dir,\n verbose=False)\n nusc_eval.main(render_curves=False)\n # nusc_eval.main(render_curves=True, plot_examples=40)\n\n # record metrics\n metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))\n detail = dict()\n metric_prefix = f'{result_name}_NuScenes'\n for class_name in self.class_names:\n for k, v in metrics['label_aps'][class_name].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}_AP_dist_{}'.format(metric_prefix, class_name,\n k)] = val\n for k, v in metrics['label_tp_errors'][class_name].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}_{}'.format(metric_prefix, class_name, k)] = val\n for k, v in metrics['tp_errors'].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}'.format(metric_prefix,\n self.ErrNameMapping[k])] = val\n\n detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']\n detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']\n return detail\n\n def format_results(self,\n results,\n img_metas,\n result_names=['img_bbox'],\n jsonfile_prefix=None,\n **kwargs):\n \"\"\"Format the results to json (standard format for COCO evaluation).\n\n Args:\n results (list[tuple | numpy.ndarray]): Testing results of the\n dataset.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing \\\n the json filepaths, tmp_dir is the temporal directory created \\\n for saving json files when jsonfile_prefix is not specified.\n \"\"\"\n assert isinstance(results, list), 'results must be a list'\n\n if jsonfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n\n # currently the output prediction results could be in two formats\n # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)\n # 2. list of dict('pts_bbox' or 'img_bbox':\n # dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))\n # this is a workaround to enable evaluation of both formats on nuScenes\n # refer to https://github.com/open-mmlab/mmdetection3d/issues/449\n # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict\n result_files = dict()\n # refactor this.\n for rasult_name in result_names:\n # not evaluate 2D predictions on nuScenes\n if '2d' in rasult_name:\n continue\n print(f'\\nFormating bboxes of {rasult_name}')\n tmp_file_ = osp.join(jsonfile_prefix, rasult_name)\n if self.output_dir:\n result_files.update({\n rasult_name:\n self._format_bbox(results, img_metas, self.output_dir)\n })\n else:\n result_files.update({\n rasult_name:\n self._format_bbox(results, img_metas, tmp_file_)\n })\n return result_files, tmp_dir\n\n def evaluate(\n self,\n results,\n img_metas,\n metric='bbox',\n logger=None,\n jsonfile_prefix=None,\n result_names=['img_bbox'],\n show=False,\n out_dir=None,\n pipeline=None,\n ):\n \"\"\"Evaluation in nuScenes protocol.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n show (bool): Whether to visualize.\n Default: False.\n out_dir (str): Path to save the visualization results.\n Default: None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict[str, float]: Results of each evaluation metric.\n \"\"\"\n result_files, tmp_dir = self.format_results(results, img_metas,\n result_names,\n jsonfile_prefix)\n if isinstance(result_files, dict):\n for name in result_names:\n print('Evaluating bboxes of {}'.format(name))\n print()\n self._evaluate_single(result_files[name])\n elif isinstance(result_files, str):\n self._evaluate_single(result_files)\n\n if tmp_dir is not None:\n tmp_dir.cleanup()\n\n def _format_bbox(self, results, img_metas, jsonfile_prefix=None):\n \"\"\"Convert the results to the standard format.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n jsonfile_prefix (str): The prefix of the output jsonfile.\n You can specify the output directory/filename by\n modifying the jsonfile_prefix. Default: None.\n\n Returns:\n str: Path of the output json file.\n \"\"\"\n nusc_annos = {}\n mapped_class_names = self.class_names\n\n print('Start to convert detection format...')\n\n for sample_id, det in enumerate(mmcv.track_iter_progress(results)):\n boxes, scores, labels = det\n\n order = np.argsort(scores)[::-1]\n order = order[:500]\n\n boxes = boxes[order]\n scores = scores[order]\n labels = labels[order]\n\n sample_token = img_metas[sample_id]['token']\n trans = np.array(img_metas[sample_id]['ego2global_translation'])\n rot = Quaternion(img_metas[sample_id]['ego2global_rotation'])\n annos = list()\n for i, box in enumerate(boxes):\n name = mapped_class_names[labels[i]]\n center = box[:3]\n wlh = box[[4, 3, 5]]\n box_yaw = box[6]\n box_vel = box[7:].tolist()\n box_vel.append(0)\n quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw)\n nusc_box = Box(center, wlh, quat, velocity=box_vel)\n nusc_box.rotate(rot)\n nusc_box.translate(trans)\n if np.sqrt(nusc_box.velocity[0]**2 +\n nusc_box.velocity[1]**2) > 0.2:\n if name in [\n 'car',\n 'construction_vehicle',\n 'bus',\n 'truck',\n 'trailer',\n ]:\n attr = 'vehicle.moving'\n elif name in ['bicycle', 'motorcycle']:\n attr = 'cycle.with_rider'\n else:\n attr = self.DefaultAttribute[name]\n else:\n if name in ['pedestrian']:\n attr = 'pedestrian.standing'\n elif name in ['bus']:\n attr = 'vehicle.stopped'\n else:\n attr = self.DefaultAttribute[name]\n nusc_anno = dict(\n sample_token=sample_token,\n translation=nusc_box.center.tolist(),\n size=nusc_box.wlh.tolist(),\n rotation=nusc_box.orientation.elements.tolist(),\n velocity=nusc_box.velocity[:2],\n detection_name=name,\n detection_score=float(scores[i]),\n attribute_name=attr,\n )\n annos.append(nusc_anno)\n # other views results of the same frame should be concatenated\n if sample_token in nusc_annos:\n nusc_annos[sample_token].extend(annos)\n else:\n nusc_annos[sample_token] = annos\n nusc_submissions = {\n 'meta': self.modality,\n 'results': nusc_annos,\n }\n mmcv.mkdir_or_exist(jsonfile_prefix)\n res_path = osp.join(jsonfile_prefix, 'results_nusc.json')\n print('Results writes to', res_path)\n mmcv.dump(nusc_submissions, res_path)\n return res_path" }, { "identifier": "BaseBEVDepth", "path": "models/base_bev_depth.py", "snippet": "class BaseBEVDepth(nn.Module):\n \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n Args:\n backbone_conf (dict): Config of backbone.\n head_conf (dict): Config of head.\n \"\"\"\n\n def __init__(self, backbone_conf, head_conf):\n super(BaseBEVDepth, self).__init__()\n self.backbone_img = BaseLSSFPN(**backbone_conf)\n self.head = BEVDepthHead(**head_conf)\n\n # for inference time measurement\n self.idx = 0\n self.times_dict = {\n 'img': [],\n 'img_backbone': [],\n 'img_dep': [],\n 'img_transform': [],\n 'img_pool': [],\n\n 'head': [],\n 'head_backbone': [],\n 'head_head': [],\n }\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n is_train=False\n ):\n \"\"\"Forward function for BEVDepth\n\n Args:\n sweep_imgs (Tensor): Input images.\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if is_train:\n self.time = None\n\n x, depth, _ = self.backbone_img(sweep_imgs, mats_dict,\n is_return_depth=True)\n preds, _ = self.head(x)\n return preds, depth\n else:\n if self.idx < 100: # skip few iterations for warmup\n self.times = None\n elif self.idx == 100:\n self.times = self.times_dict\n\n x, self.times = self.backbone_img(sweep_imgs, mats_dict,\n times=self.times)\n preds, self.times = self.head(x, times=self.times)\n\n if self.idx == 1000:\n time_mean = {}\n for k, v in self.times.items():\n time_mean[k] = sum(v) / len(v)\n print('img: %.2f' % time_mean['img'])\n print(' img_backbone: %.2f' % time_mean['img_backbone'])\n print(' img_dep: %.2f' % time_mean['img_dep'])\n print(' img_transform: %.2f' % time_mean['img_transform'])\n print(' img_pool: %.2f' % time_mean['img_pool'])\n print('head: %.2f' % time_mean['head'])\n print(' head_backbone: %.2f' % time_mean['head_backbone'])\n print(' head_head: %.2f' % time_mean['head_head'])\n total = time_mean['img'] + time_mean['head']\n print('total: %.2f' % total)\n print(' ')\n print('FPS: %.2f' % (1000/total))\n\n self.idx += 1\n return preds\n\n def get_targets(self, gt_boxes, gt_labels):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n return self.head.get_targets(gt_boxes, gt_labels)\n\n def loss(self, targets, preds_dicts):\n \"\"\"Loss function for BEVDepth.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n return self.head.loss(targets, preds_dicts)\n\n def get_bboxes(self, preds_dicts, img_metas=None, img=None, rescale=False):\n \"\"\"Generate bboxes from bbox head predictions.\n\n Args:\n preds_dicts (tuple[list[dict]]): Prediction results.\n img_metas (list[dict]): Point cloud and image's meta info.\n\n Returns:\n list[dict]: Decoded bbox, scores and labels after nms.\n \"\"\"\n return self.head.get_bboxes(preds_dicts, img_metas, img, rescale)" }, { "identifier": "all_gather_object", "path": "utils/torch_dist.py", "snippet": "def all_gather_object(obj):\n world_size = get_world_size()\n if world_size < 2:\n return [obj]\n output = [None for _ in range(world_size)]\n dist.all_gather_object(output, obj)\n return output" }, { "identifier": "synchronize", "path": "utils/torch_dist.py", "snippet": "def synchronize():\n \"\"\"Helper function to synchronize (barrier)\n among all processes when using distributed training\"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n current_world_size = dist.get_world_size()\n if current_world_size == 1:\n return\n dist.barrier()" } ]
from functools import partial from pytorch_lightning.core import LightningModule from torch.cuda.amp.autocast_mode import autocast from torch.optim.lr_scheduler import MultiStepLR from mmcv.runner import build_optimizer from datasets.nusc_det_dataset import NuscDatasetRadarDet, collate_fn from evaluators.det_evaluators import DetNuscEvaluator from models.base_bev_depth import BaseBEVDepth from utils.torch_dist import all_gather_object, synchronize import mmcv import torch import torch.nn.functional as F import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed import torchvision.models as models
14,762
depth=50, frozen_stages=0, out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True } CLASSES = [ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone', ] head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=80, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64]), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5]), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=83, nms_thr=0.2), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } class BEVDepthLightningModel(LightningModule): MODEL_NAMES = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) def __init__(self, gpus: int = 1, data_root='data/nuScenes', eval_interval=1, batch_size_per_device=8, class_names=CLASSES, backbone_img_conf=backbone_img_conf, head_conf=head_conf, ida_aug_conf=ida_aug_conf, bda_aug_conf=bda_aug_conf, rda_aug_conf=rda_aug_conf, default_root_dir='./outputs/', **kwargs): super().__init__() self.save_hyperparameters() self.gpus = gpus self.optimizer_config = optimizer_config self.pretrain_config = pretrain_config self.eval_interval = eval_interval self.batch_size_per_device = batch_size_per_device self.data_root = data_root self.class_names = class_names self.backbone_img_conf = backbone_img_conf self.head_conf = head_conf self.ida_aug_conf = ida_aug_conf self.bda_aug_conf = bda_aug_conf self.rda_aug_conf = rda_aug_conf mmcv.mkdir_or_exist(default_root_dir) self.default_root_dir = default_root_dir
# Copyright (c) Megvii Inc. All rights reserved. pretrain_config = dict( img_model_path=None, img_load_key=[], img_freeze_key=None, pts_model_path=None, pts_load_key=[]) optimizer_config = dict( type='AdamW', lr=2e-4, weight_decay=1e-2) H = 900 W = 1600 final_dim = (256, 704) img_conf = dict(img_mean=[123.675, 116.28, 103.53], img_std=[58.395, 57.12, 57.375], to_rgb=True) ida_aug_conf = { 'resize_lim': (0.386, 0.55), 'final_dim': final_dim, 'rot_lim': (-5.4, 5.4), 'H': 900, 'W': 1600, 'rand_flip': True, 'bot_pct_lim': (0.0, 0.0), 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], 'Ncams': 6, } bda_aug_conf = { 'rot_ratio': 1.0, 'rot_lim': (-22.5, 22.5), 'scale_lim': (0.95, 1.05), 'flip_dx_ratio': 0.5, 'flip_dy_ratio': 0.5 } rda_aug_conf = { 'N_sweeps': 6, 'N_use': 5, 'drop_ratio': 0.1, } backbone_img_conf = { 'x_bound': [-51.2, 51.2, 0.8], 'y_bound': [-51.2, 51.2, 0.8], 'z_bound': [-5, 3, 8], 'd_bound': [2.0, 58.0, 0.8], 'final_dim': final_dim, 'output_channels': 80, 'downsample_factor': 16, 'img_backbone_conf': dict( type='ResNet', depth=50, frozen_stages=0, out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True } CLASSES = [ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone', ] head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=80, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64]), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5]), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=83, nms_thr=0.2), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } class BEVDepthLightningModel(LightningModule): MODEL_NAMES = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) def __init__(self, gpus: int = 1, data_root='data/nuScenes', eval_interval=1, batch_size_per_device=8, class_names=CLASSES, backbone_img_conf=backbone_img_conf, head_conf=head_conf, ida_aug_conf=ida_aug_conf, bda_aug_conf=bda_aug_conf, rda_aug_conf=rda_aug_conf, default_root_dir='./outputs/', **kwargs): super().__init__() self.save_hyperparameters() self.gpus = gpus self.optimizer_config = optimizer_config self.pretrain_config = pretrain_config self.eval_interval = eval_interval self.batch_size_per_device = batch_size_per_device self.data_root = data_root self.class_names = class_names self.backbone_img_conf = backbone_img_conf self.head_conf = head_conf self.ida_aug_conf = ida_aug_conf self.bda_aug_conf = bda_aug_conf self.rda_aug_conf = rda_aug_conf mmcv.mkdir_or_exist(default_root_dir) self.default_root_dir = default_root_dir
self.evaluator = DetNuscEvaluator(class_names=self.class_names,
2
2023-12-06 14:57:49+00:00
24k
jinxixiang/magic_animate_unofficial
animatediff/magic_animate/pipeline.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/magic_animate/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n\n # Additional\n use_motion_module=False,\n motion_module_resolutions=(1, 2, 4, 8),\n motion_module_mid_block=False,\n motion_module_decoder_only=False,\n motion_module_type=None,\n motion_module_kwargs={},\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n\n # Addition for image embeddings\n use_image_condition=False,\n # Additional for dwpose adapter\n use_dwpose_adapter=False,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # dwpose condition\n if use_dwpose_adapter:\n self.dwpose_adapter = ControlNetConditioningEmbedding(conditioning_embedding_channels=4) # pose guider net\n else:\n self.dwpose_adapter = None\n\n self.use_image_condition = False\n if use_image_condition:\n self.use_image_condition = True\n self.image_proj_model = Resampler(\n dim=cross_attention_dim,\n depth=4,\n dim_head=64,\n heads=12,\n num_queries=16,\n embedding_dim=1024,\n output_dim=cross_attention_dim,\n ff_mult=4,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (\n not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n # for pose_guider\n dwpose_conditions: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2 ** self.num_upsamplers\n\n # if self.use_image_condition:\n # # project global image to 16 tokens for cross-attention\n # encoder_hidden_states = self.image_proj(encoder_hidden_states)\n # encoder_hidden_states = encoder_hidden_states.reshape(-1, 16, 768)\n # encoder_hidden_states = self.image_norm(encoder_hidden_states)\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # add pose conditions\n if dwpose_conditions is not None:\n conditions = self.dwpose_adapter(dwpose_conditions)\n sample += conditions\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb,\n encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets):]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size,\n encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n\n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n\n return model" }, { "identifier": "ControlNetModel", "path": "animatediff/magic_animate/controlnet.py", "snippet": "class ControlNetModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n ):\n super().__init__()\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlock2DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n\n @classmethod\n def from_unet(\n cls,\n unet: UNet2DConditionModel,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n load_weights_from_unet: bool = True,\n ):\n r\"\"\"\n Instantiate Controlnet class from UNet2DConditionModel.\n\n Parameters:\n unet (`UNet2DConditionModel`):\n UNet model which weights are copied to the ControlNet. Note that all configuration options are also\n copied where applicable.\n \"\"\"\n controlnet = cls(\n in_channels=unet.config.in_channels,\n flip_sin_to_cos=unet.config.flip_sin_to_cos,\n freq_shift=unet.config.freq_shift,\n down_block_types=unet.config.down_block_types,\n only_cross_attention=unet.config.only_cross_attention,\n block_out_channels=unet.config.block_out_channels,\n layers_per_block=unet.config.layers_per_block,\n downsample_padding=unet.config.downsample_padding,\n mid_block_scale_factor=unet.config.mid_block_scale_factor,\n act_fn=unet.config.act_fn,\n norm_num_groups=unet.config.norm_num_groups,\n norm_eps=unet.config.norm_eps,\n cross_attention_dim=unet.config.cross_attention_dim,\n attention_head_dim=unet.config.attention_head_dim,\n use_linear_projection=unet.config.use_linear_projection,\n class_embed_type=unet.config.class_embed_type,\n num_class_embeds=unet.config.num_class_embeds,\n upcast_attention=unet.config.upcast_attention,\n resnet_time_scale_shift=unet.config.resnet_time_scale_shift,\n projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,\n controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,\n conditioning_embedding_out_channels=conditioning_embedding_out_channels,\n )\n\n if load_weights_from_unet:\n controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())\n controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())\n controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())\n\n if controlnet.class_embedding:\n controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())\n\n controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())\n controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())\n\n return controlnet\n\n # @property\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n # def attn_processors(self) -> Dict[str, AttentionProcessor]:\n # r\"\"\"\n # Returns:\n # `dict` of attention processors: A dictionary containing all attention processors used in the model with\n # indexed by its weight name.\n # \"\"\"\n # # set recursively\n # processors = {}\n\n # def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n # if hasattr(module, \"set_processor\"):\n # processors[f\"{name}.processor\"] = module.processor\n\n # for sub_name, child in module.named_children():\n # fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n # return processors\n\n # for name, module in self.named_children():\n # fn_recursive_add_processors(name, module, processors)\n\n # return processors\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n # def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n # r\"\"\"\n # Parameters:\n # `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):\n # The instantiated processor class or a dictionary of processor classes that will be set as the processor\n # of **all** `Attention` layers.\n # In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:\n\n # \"\"\"\n # count = len(self.attn_processors.keys())\n\n # if isinstance(processor, dict) and len(processor) != count:\n # raise ValueError(\n # f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n # f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n # )\n\n # def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n # if hasattr(module, \"set_processor\"):\n # if not isinstance(processor, dict):\n # module.set_processor(processor)\n # else:\n # module.set_processor(processor.pop(f\"{name}.processor\"))\n\n # for sub_name, child in module.named_children():\n # fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n # for name, module in self.named_children():\n # fn_recursive_attn_processor(name, module, processor)\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n # def set_default_attn_processor(self):\n # \"\"\"\n # Disables custom attention processors and sets the default attention implementation.\n # \"\"\"\n # self.set_attn_processor(AttnProcessor())\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n conditioning_scale: float = 1.0,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n\n sample += controlnet_cond\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n # 6. scaling\n down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]\n mid_block_res_sample *= conditioning_scale\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )" }, { "identifier": "ReferenceAttentionControl", "path": "animatediff/magic_animate/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1,\n clip_length=8,\n is_image=False,\n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n clip_length,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks=fusion_blocks,\n batch_size=batch_size,\n is_image=is_image,\n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n clip_length,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n is_image=False,\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n # uc_mask = (\n # torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n # .to(device)\n # .bool()\n # )\n\n uc_mask = (\n torch.Tensor(\n [1] * batch_size * num_images_per_prompt * clip_length + [0] * batch_size * num_images_per_prompt * clip_length)\n .to(device)\n .bool()\n )\n\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n if not is_image:\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n\n hidden_states_uc = self.attn1(norm_hidden_states,\n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if not is_image:\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "animatediff/magic_animate/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "animatediff/magic_animate/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "animatediff/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist import einops from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from animatediff.magic_animate.unet_controlnet import UNet3DConditionModel from animatediff.magic_animate.controlnet import ControlNetModel from animatediff.magic_animate.mutual_self_attention import ReferenceAttentionControl from animatediff.magic_animate.context import ( get_context_scheduler, get_total_steps ) from animatediff.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,887
): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 pred_dir = (1 - alpha_prod_t_next) ** 0.5 * model_output x_next = alpha_prod_t_next ** 0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx + 1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor: int, device): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0], latents.shape[1], ((latents.shape[2] - 1) * interpolation_factor) + 1, latents.shape[3], latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i / interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0, i1 in zip(range(org_video_length), range(org_video_length)[1:]): v0 = latents[:, :, i0, :, :] v1 = latents[:, :, i1, :, :] new_latents[:, :, new_index, :, :] = v0 new_index += 1 for f in rate:
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1: -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank != 0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx + 1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx + 1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = ( batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length // clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet # condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = condition.to(device=device, dtype=dtype) # condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = einops.repeat(condition, 'b f c h w -> (b r) f c h w', r=num_videos_per_prompt) condition = rearrange(condition, 'b f c h w -> (b f) c h w').clone() # condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 pred_dir = (1 - alpha_prod_t_next) ** 0.5 * model_output x_next = alpha_prod_t_next ** 0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx + 1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor: int, device): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0], latents.shape[1], ((latents.shape[2] - 1) * interpolation_factor) + 1, latents.shape[3], latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i / interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0, i1 in zip(range(org_video_length), range(org_video_length)[1:]): v0 = latents[:, :, i0, :, :] v1 = latents[:, :, i1, :, :] new_latents[:, :, new_index, :, :] = v0 new_index += 1 for f in rate:
v = get_tensor_interpolation_method()(v0.to(device=device), v1.to(device=device), f)
5
2023-12-12 00:16:39+00:00
24k
qitan/devops-backend-lite
qtasks/tasks.py
[ { "identifier": "app", "path": "celery_tasks/celery.py", "snippet": "" }, { "identifier": "KubernetesDeploy", "path": "dbapp/model/model_cmdb.py", "snippet": "class KubernetesDeploy(TimeAbstract):\n appinfo = models.ForeignKey(\n AppInfo, related_name='app_info', null=True, on_delete=models.CASCADE)\n kubernetes = models.ForeignKey(\n KubernetesCluster, related_name='app_k8s', null=True, on_delete=models.CASCADE)\n online = models.SmallIntegerField(default=0, choices=G_ONLINE_CHOICE, verbose_name='是否上线',\n help_text=f'默认为0,即未上线\\n可选项: {G_ONLINE_CHOICE}')\n version = models.CharField(\n max_length=250, blank=True, null=True, verbose_name='当前版本')\n\n def __str__(self):\n return '%s-%s' % (self.appinfo.app.appid, self.kubernetes.name)\n\n class Meta:\n db_table = 'cmdb_kubernetesdeploy'\n default_permissions = ()" }, { "identifier": "MicroApp", "path": "dbapp/model/model_cmdb.py", "snippet": "class MicroApp(TimeAbstract):\n appid = models.CharField(max_length=250, db_index=True, unique=True, verbose_name='应用ID',\n help_text='应用唯一标识,无需填写')\n name = models.CharField(max_length=128, verbose_name='应用')\n alias = models.CharField(max_length=128, blank=True, verbose_name='别名')\n project = models.ForeignKey(\n Project, on_delete=models.PROTECT, null=True, blank=True, verbose_name='项目')\n creator = models.ForeignKey(UserProfile, on_delete=models.PROTECT, null=True, blank=True, verbose_name='应用创建人',\n help_text='前端不需要传递')\n repo = models.JSONField(default=dict, verbose_name='仓库地址',\n help_text='{\"id\": id, \"name\": name, \"http_url_to_repo\": url}')\n target = models.JSONField(default=get_default_value, verbose_name='JAR包配置',\n help_text='默认:default, {\"default\": \"default\", \"custom\": \"xxx/a.war\"}')\n team_members = models.JSONField(\n default=get_default_team_members, verbose_name=\"团队成员组\")\n category = models.CharField(\n max_length=128, blank=True, null=True, verbose_name='应用分类')\n template = models.JSONField(default=dict, verbose_name='应用配置',\n help_text='从数据字典接口获取,对应项的key为TEMPLATE, 数据格式为对象.\\n对应项的extra属性.\\n参数说明:\\nstrategy: 策略配置\\n - replicas: 副本, integer\\n - revisionHistoryLimit: 保留副本, integer\\n - minReadySeconds: 更新等待时间, integer\\n - maxSurge/maxUnavailable: 比例缩放 \\n\\nresources: 资源配额\\n - limits.cpu: CPU限制\\n - limits.memory: 内存限制\\n - requests.cpu: CPU请求\\n - requests.memory: 内存请求 \\n\\nenv: 环境变量, 数组[{\"name\": \"env1\", \"value\": \"value1\"}]\\n\\ncommand: 启动命令, 字符串')\n language = models.CharField(\n max_length=32, default='java', verbose_name='开发语言')\n build_command = models.CharField(max_length=250, blank=True, null=True, verbose_name='构建命令',\n help_text='根据应用开发语言, 从getKey(\"LANGUAGE\")获取数据, 取出extra字段的build值')\n multiple_app = models.BooleanField(\n default=False, blank=True, verbose_name='多应用标志')\n multiple_ids = models.JSONField(default=list, verbose_name='多应用关联ID列表')\n dockerfile = models.JSONField(default=get_default_value, verbose_name='Dockerfile配置',\n help_text='默认:{default: null}, 可选: {\"default|默认\": null, \"project|使用项目Dockerfile\": \"project\", \"custom|自定义Dockerfile\": \"\"}')\n online = models.BooleanField(default=True, blank=True, verbose_name='上线下线',\n help_text='应用上线/下线状态标记, 下线状态的应用禁止发布.')\n desc = models.TextField(verbose_name='描述', null=True, blank=True)\n notify = models.JSONField(default=dict, verbose_name='消息通知')\n can_edit = models.JSONField(default=list, verbose_name='管理人员',\n help_text='有权限编辑该应用的人员ID\\n格式为数组, 如[1,2]')\n is_k8s = models.CharField(max_length=8, default='k8s', choices=G_DEPLOY_TYPE, verbose_name='部署方式',\n help_text=f'默认k8s, 可选: {dict(G_DEPLOY_TYPE)}')\n modules = models.JSONField(default=list, verbose_name='工程模块')\n\n def __str__(self):\n return '[%s]%s' % (self.name, self.alias)\n\n class ExtMeta:\n related = True\n dashboard = True\n icon = 'component'\n\n class Meta:\n db_table = 'cmdb_microapp'\n default_permissions = ()\n ordering = ['-created_time']\n verbose_name = '应用'\n verbose_name_plural = verbose_name + '管理'" }, { "identifier": "Project", "path": "dbapp/model/model_cmdb.py", "snippet": "class Project(TimeAbstract, CommonParent):\n \"\"\"项目\"\"\"\n projectid = models.CharField(max_length=128, db_index=True, unique=True, verbose_name='项目ID',\n help_text='前端无须传值')\n name = models.CharField(max_length=100, verbose_name='项目名称')\n alias = models.CharField(max_length=128, default='', verbose_name='项目别名')\n product = models.ForeignKey(\n Product, on_delete=models.PROTECT, null=True, blank=True, verbose_name=\"区域\")\n creator = models.ForeignKey(UserProfile, on_delete=models.PROTECT, null=True, blank=True, verbose_name='项目创建人',\n help_text='前端不需要传递')\n manager = models.SmallIntegerField(\n blank=True, null=True, verbose_name='项目负责人')\n developer = models.SmallIntegerField(\n blank=True, null=True, verbose_name='开发负责人')\n tester = models.SmallIntegerField(\n blank=True, null=True, verbose_name='测试负责人')\n desc = models.TextField(verbose_name='描述', null=True, blank=True)\n notify = models.JSONField(default=dict, verbose_name='消息通知')\n\n def __str__(self):\n return self.name\n\n class ExtMeta:\n related = True\n dashboard = True\n icon = 'tree-table'\n\n class Meta:\n db_table = 'cmdb_project'\n verbose_name = '项目'\n verbose_name_plural = verbose_name + '管理'\n default_permissions = ()" }, { "identifier": "KubernetesCluster", "path": "dbapp/models.py", "snippet": "" }, { "identifier": "GitLabAPI", "path": "common/utils/GitLabAPI.py", "snippet": "class GitLabAPI(object):\n def __init__(self, url, user=None, password=None, token=None, oauth=False):\n self.__url = url\n if token:\n self.__token = token\n if oauth:\n params = {'oauth_token': self.__token}\n else:\n params = {'private_token': self.__token}\n self.__gl = gitlab.Gitlab(self.__url, **params)\n else:\n self.__gl = gitlab.Gitlab(\n self.__url, http_username=user, http_password=password)\n self.__gl.auth()\n\n def get_gl(self):\n return self.__gl\n\n def list_projects(self, get_all=False, key=None, per_page=20, page=1):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n projects = self.__gl.projects.list(**params)\n return projects\n\n def get_project(self, project_id=None, project_name_with_namespace=None):\n if any([project_id, project_name_with_namespace]) is False:\n raise Exception('缺少参数,project_id或project_name_with_namespace必选其一.')\n condition = project_id or project_name_with_namespace\n try:\n project = self.__gl.projects.get(condition)\n return project\n except BaseException as e:\n logger.info(e)\n return None\n\n def create_project(self, name, namespace_id=None, initialize_with_readme=False):\n payload = {'name': name, 'path': name,\n 'initialize_with_readme': initialize_with_readme}\n if namespace_id:\n payload['namespace_id'] = namespace_id\n try:\n ret = self.__gl.projects.create(payload)\n return True, ret\n except BaseException as e:\n logger.exception(f'创建分支请求异常,原因:{e.__dict__}')\n return False, e\n\n def get_commit(self, commit_id, project_id=None, project_name_with_namespace=None):\n try:\n commit = self.get_project(\n project_id, project_name_with_namespace).get(commit_id)\n return commit\n except BaseException as e:\n logger.info(e)\n return None\n\n def list_groups(self, get_all=False, key=None, per_page=20, page=1):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n groups = self.__gl.groups.list(**params)\n return [{'id': i.id, 'name': i.name, 'description': i.description} for i in groups if not i.parent_id]\n\n def create_group(self, name, path=None, desc=None, parent=None):\n \"\"\"\n 创建组\n \"\"\"\n payload = {'name': name, 'path': path or name,\n 'description': desc or ''}\n if parent:\n payload['parent_id'] = parent\n try:\n group = self.__gl.groups.create(payload)\n return True, group\n except BaseException as e:\n logger.info(e)\n return False, e\n\n def create_branch(self, project, src_branch, target_branch):\n payload = {'branch': target_branch,\n 'ref': src_branch}\n if isinstance(project, (int,)):\n project = self.get_project(project)\n try:\n ret = project.branches.create(payload)\n return True, ret\n except BaseException as e:\n logger.exception(f'创建分支请求异常,原因:{e.__dict__}')\n return False, e\n\n def list_branches(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1, protected='0', *args, **kwargs):\n params = {'per_page': per_page, 'page': page}\n if not protected:\n protected = '0'\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n params.update(kwargs)\n branches = self.get_project(project_id=project_id,\n project_name_with_namespace=project_name_with_namespace).branches.list(**params)\n branches = [{'uid': f\"{G_COMMIT[0][0]}:{i.name}\", 'name': i.name, 'commit': i.commit, 'label': G_COMMIT[0][0], 'protected': i.protected}\n for i in branches]\n if protected != '0':\n # 过滤受保护分支\n _map = {'1': True, '2': False}\n branches = [i for i in branches if i['protected']\n == _map[protected]]\n return branches\n\n def list_protected_branches(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1, *args, **kwargs):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n params.update(kwargs)\n branches = self.get_project(project_id=project_id,\n project_name_with_namespace=project_name_with_namespace).protectedbranches.list(**params)\n branches = [{'uid': f\"{G_COMMIT[0][0]}:{i.name}\", 'name': i.name, 'commit': i.commit, 'label': G_COMMIT[0][0], 'protected': i.protected}\n for i in branches]\n return branches\n\n def list_tags(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n tags = self.get_project(\n project_id, project_name_with_namespace).tags.list(**params)\n tags = [{'uid': f\"{G_COMMIT[1][0]}:{i.name}\", 'name': i.name, 'message': i.message, 'commit': i.commit,\n 'label': G_COMMIT[1][0]} for i in tags]\n return tags\n\n def list_commits(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1, ref_name=None, since=None):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n if ref_name:\n params['ref_name'] = ref_name\n if since:\n params['since'] = since\n commits = self.get_project(\n project_id, project_name_with_namespace).commits.list(**params)\n commits = [\n {'title': i.title, 'short_id': i.short_id, 'author_name': i.author_name, 'committer_name': i.committer_name,\n 'committed_date': i.committed_date, 'message': i.message, 'web_url': i.web_url} for i in commits]\n return commits\n\n def repo_checkout(self, repo):\n import subprocess\n git_url = repo.split('//')\n subprocess.call(\n ['git', 'clone', f\"{git_url[0]}//oauth2:{self.__token}@{git_url[1]}\"])\n\n def get_user_id(self, username):\n user_list = self.__gl.users.list(username=username)\n if user_list:\n return user_list[0].id\n else:\n return None\n\n def get_project_from_name(self, project_name):\n projects = self.__gl.projects.list(search=project_name)\n for p in projects:\n if p.name == project_name:\n return p\n return None\n\n def add_project_member(self, project, user_id, access_level):\n try:\n project.members.create(\n {'user_id': user_id, 'access_level': access_level})\n return True, '成功'\n except Exception as error:\n return False, error\n\n def del_project_member(self, project, user_id):\n try:\n project.members.delete(user_id)\n return True, '成功'\n except Exception as error:\n return False, error" }, { "identifier": "BuildJob", "path": "dbapp/model/model_deploy.py", "snippet": "class BuildJob(TimeAbstract):\n \"\"\"\n 持续构建模型\n \"\"\"\n order_id = models.IntegerField(default=0, verbose_name='发布工单ID')\n appid = models.CharField(max_length=250, default='0',\n verbose_name='应用ID', help_text='应用唯一标识,无需填写')\n appinfo_id = models.IntegerField(\n default=0, db_index=True, verbose_name='应用模块ID')\n deployer = models.ForeignKey(UserProfile, verbose_name='发布人', blank=True, related_name='deployer', null=True,\n default=None, on_delete=models.SET_NULL)\n # {0: 未构建, 1: 构建成功, 2: 构建失败, 3: 构建中, 4: 作废}\n status = models.SmallIntegerField(default=3, choices=G_CI_STATUS, verbose_name=\"状态\",\n help_text=f\"状态值: {dict(G_CI_STATUS)}\")\n queue_number = models.IntegerField(default=0, verbose_name='队列ID')\n build_number = models.IntegerField(default=0, verbose_name='构建ID')\n commits = models.JSONField(default=dict, verbose_name='提交信息')\n commit_tag = models.JSONField(default=dict, verbose_name='提交类型',\n help_text='label可选: heads|tags\\nname: 具体的分支或者标签\\n{\"label\": \"heads\", \"name\": \"master\"}')\n # {0: 构建, 1: 构建发布}\n is_deploy = models.SmallIntegerField(default=0, verbose_name='构建发布',\n help_text='是否构建完后进行发布, {0: 不发布, 1: 发布}')\n jenkins_flow = models.TextField(\n verbose_name='jenkins pipeline', blank=True, null=True, default=\"\")\n image = models.CharField(max_length=250, blank=True,\n null=True, verbose_name='容器镜像')\n sync_status = models.SmallIntegerField(default=0, choices=G_IMAGE_SYNC_STAT, verbose_name='镜像同步状态',\n help_text=f\"{dict(G_IMAGE_SYNC_STAT)}, 默认0\")\n modules = models.CharField(\n max_length=250, blank=True, null=True, verbose_name='工程模块')\n batch_uuid = models.CharField(\n max_length=40, null=True, blank=True, verbose_name='批量部署标识')\n\n @property\n def job_name(self):\n try:\n appinfo_obj = AppInfo.objects.get(id=self.appinfo_id)\n job_name = f'{appinfo_obj.environment.name}-{appinfo_obj.app.category.split(\".\")[-1]}-{appinfo_obj.app.project.name}-{appinfo_obj.app.name.split(\".\")[-1]}'.lower(\n )\n except AppInfo.DoesNotExist:\n job_name = ''\n return job_name\n\n def __str__(self):\n return '%s-%s-%s' % (self.order_id, self.appinfo_id, self.image)\n\n class Meta:\n db_table = 'deploy_buildjob'\n default_permissions = ()\n ordering = ['-id']" }, { "identifier": "DeployJob", "path": "dbapp/model/model_deploy.py", "snippet": "class DeployJob(TimeAbstract):\n \"\"\"\n 持续部署模型\n \"\"\"\n uniq_id = models.CharField(\n max_length=250, unique=True, verbose_name='发布ID')\n order_id = models.CharField(max_length=40, null=True, blank=True, verbose_name=u'工单号',\n help_text='前端不需要传值')\n appid = models.CharField(max_length=250, default='0',\n verbose_name='应用ID', help_text='应用唯一标识,无需填写')\n appinfo_id = models.IntegerField(\n default=0, db_index=True, verbose_name='应用模块ID')\n deployer = models.ForeignKey(UserProfile, verbose_name='发布人', blank=True, related_name='cd_deployer', null=True,\n default=None, on_delete=models.SET_NULL)\n status = models.SmallIntegerField(default=0, choices=G_CD_STATUS, verbose_name=\"状态\",\n help_text=f'部署状态: {dict(G_CD_STATUS)}, 默认0')\n image = models.CharField(max_length=250, blank=True,\n null=True, verbose_name='容器镜像')\n kubernetes = models.JSONField(default=list, verbose_name='部署集群',\n help_text='待发布集群\\n格式为array, 存储集群id, eg: [1,2]')\n deploy_type = models.SmallIntegerField(default=0, choices=G_CD_TYPE, verbose_name='部署类型',\n help_text=f\"{dict(G_CD_TYPE)}, 默认0\")\n rollback_reason = models.SmallIntegerField(null=True, blank=True,\n verbose_name='回滚原因') # 具体类型查看 datadict 的 ROLLBACK_TYPE\n rollback_comment = models.TextField(\n null=True, blank=True, default='', verbose_name='回滚备注')\n modules = models.CharField(\n max_length=250, blank=True, null=True, verbose_name='工程模块')\n batch_uuid = models.CharField(\n max_length=40, null=True, blank=True, verbose_name='批量部署标识')\n\n @property\n def job_name(self):\n try:\n appinfo_obj = AppInfo.objects.get(id=self.appinfo_id)\n job_name = f'{appinfo_obj.environment}-{appinfo_obj.app.category.split(\".\")[-1]}-{appinfo_obj.app.project.name}-{appinfo_obj.app.name.split(\".\")[-1]}'.lower(\n )\n except AppInfo.DoesNotExist:\n job_name = ''\n return job_name\n\n def __str__(self) -> str:\n return self.uniq_id\n\n class Meta:\n db_table = 'deploy_deployjob'\n default_permissions = ()\n ordering = ['-id']" }, { "identifier": "PublishApp", "path": "dbapp/model/model_deploy.py", "snippet": "class PublishApp(TimeAbstract):\n \"\"\"\n 发布工单待发布应用\n \"\"\"\n order_id = models.CharField(\n max_length=40, verbose_name=u'工单号', help_text='前端不需要传值')\n appid = models.CharField(max_length=250, default='0',\n verbose_name='应用ID', help_text='应用唯一标识,无需填写')\n appinfo_id = models.IntegerField(\n default=0, verbose_name='应用模块ID, AppInfo id')\n name = models.CharField(max_length=128, verbose_name='应用名称')\n alias = models.CharField(max_length=128, verbose_name='应用别名')\n project = models.CharField(\n max_length=128, default='', verbose_name='项目', help_text='项目唯一ID, projectid')\n product = models.CharField(max_length=128, default='',\n verbose_name='产品', help_text='产品名称, name')\n category = models.CharField(\n max_length=128, blank=True, null=True, verbose_name='应用分类')\n environment = models.IntegerField(\n null=True, blank=True, verbose_name='应用环境', help_text=\"应用环境ID\")\n branch = models.CharField(max_length=64, null=True,\n blank=True, verbose_name='构建分支')\n image = models.CharField(max_length=250, blank=True,\n null=True, verbose_name='容器镜像')\n commits = models.JSONField(default=dict, verbose_name='提交信息')\n deploy_type = models.CharField(\n max_length=50, null=True, blank=True, verbose_name='部署类型')\n deploy_type_tag = models.SmallIntegerField(default=0, choices=G_CD_TYPE, verbose_name='部署类型标识',\n help_text=f\"{dict(G_CD_TYPE)}, 默认0\")\n status = models.SmallIntegerField(default=0, choices=G_ORDER_STATUS, verbose_name='发布状态',\n help_text=f'发布状态:\\n{G_ORDER_STATUS}')\n delete_flag = models.BooleanField(\n default=False, blank=False, verbose_name='逻辑删除')\n modules = models.CharField(\n max_length=250, blank=True, null=True, verbose_name='工程模块')\n\n def __str__(self):\n return '%s-%s-%s' % (self.order_id, self.appinfo_id, self.name)\n\n class Meta:\n db_table = 'deploy_publishapp'\n verbose_name = '待发布应用'\n verbose_name_plural = verbose_name + '管理'\n default_permissions = ()" }, { "identifier": "PublishOrder", "path": "dbapp/model/model_deploy.py", "snippet": "class PublishOrder(TimeAbstract):\n \"\"\"\n 发布工单,关联工单审批\n \"\"\"\n order_id = models.CharField(\n max_length=40, unique=True, verbose_name=u'工单号', help_text='前端不需要传值')\n dingtalk_tid = models.CharField(max_length=250, default=None, null=True, blank=True, verbose_name='钉钉工单ID',\n help_text='填写钉钉流程单号, 可为空')\n title = models.CharField(default='', max_length=250, verbose_name=u'标题')\n category = models.SmallIntegerField(default=0, choices=G_TICKET_TYPE, verbose_name='发版类型',\n help_text=f'可选: {G_TICKET_TYPE}')\n creator = models.ForeignKey(UserProfile, null=True, related_name='publish_creator', on_delete=models.SET_NULL,\n verbose_name=u'工单创建人')\n node_name = models.CharField(\n max_length=50, blank=True, null=True, verbose_name='节点')\n content = models.TextField(default='', verbose_name=u'变更内容')\n formdata = models.JSONField(default=dict, verbose_name='上线表单')\n effect = models.TextField(blank=True, null=True, verbose_name=u'影响')\n environment = models.IntegerField(\n null=True, blank=True, verbose_name='应用环境', help_text=\"应用环境ID\")\n apps = models.ManyToManyField(\n PublishApp, related_name='publish_apps', verbose_name='待发布应用')\n app = models.JSONField(default=list, verbose_name='应用服务',\n help_text='工单未审核通过, 展示关联的待发布应用.\\n格式为数组, 存放应用ID, 如[1, 2]')\n # {0: 未构建, 1: 构建成功, 2: 构建失败, 3: 构建中, 4: 作废/中止}\n status = models.SmallIntegerField(default=0, choices=G_ORDER_STATUS, verbose_name='发布单状态',\n help_text=f'工单状态:\\n{G_ORDER_STATUS}')\n result = models.TextField(blank=True, null=True,\n verbose_name=u'处理结果', help_text='前端无需传值')\n expect_time = models.DateTimeField(\n verbose_name='期望发布时间', default=None, null=True)\n executor = models.ForeignKey(UserProfile, null=True, related_name='publish_executor', on_delete=models.SET_NULL,\n help_text='前端不需要传值')\n deploy_time = models.DateTimeField(\n verbose_name='发布时间', default=None, null=True)\n method = models.CharField(max_length=6, default='manual',\n verbose_name='发版方式', help_text='{manual: 手动, auto: 自动, plan: 定时}')\n team_members = models.JSONField(default=list, verbose_name='团队人员')\n extra_deploy_members = models.JSONField(\n default=list, verbose_name='额外指定发布人员')\n\n def __str__(self):\n return str(self.title)\n\n class Meta:\n db_table = 'deploy_publishorder'\n default_permissions = ()\n verbose_name = '发布工单'\n verbose_name_plural = verbose_name + '管理'\n ordering = ['-created_time']" }, { "identifier": "BuildJobResult", "path": "dbapp/model/model_deploy.py", "snippet": "class BuildJobResult(TimeAbstract):\n \"\"\"\n CI结果\n \"\"\"\n job_id = models.IntegerField(default=0, db_index=True, verbose_name='任务ID')\n result = models.JSONField(default=dict, verbose_name='构建结果')\n console_output = models.TextField(default='', verbose_name='控制台输出结果')\n\n class Meta:\n db_table = 'deploy_buildjobresult'\n default_permissions = ()\n ordering = ['-id']" }, { "identifier": "UserProfile", "path": "dbapp/model/model_ucenter.py", "snippet": "class UserProfile(TimeAbstract, AbstractUser):\n \"\"\"\n 用户信息\n \"\"\"\n mobile = models.CharField(max_length=11, null=True,\n blank=True, verbose_name=\"手机号码\")\n avatar = models.ImageField(upload_to=\"static/%Y/%m\", default=\"image/default.png\",\n max_length=250, null=True, blank=True)\n department = models.ManyToManyField(\n Organization, related_name='org_user', verbose_name='部门')\n position = models.CharField(\n max_length=50, null=True, blank=True, verbose_name=\"职能\")\n title = models.CharField(max_length=50, null=True,\n blank=True, verbose_name=\"职位\")\n leader_user_id = models.CharField(\n max_length=64, null=True, blank=True, verbose_name=\"直属领导ID\")\n roles = models.ManyToManyField(\n \"Role\", verbose_name=\"角色\", related_name='user_role', blank=True)\n dn = models.CharField(max_length=120, null=True,\n blank=True, unique=True, verbose_name=\"ldap dn\")\n is_ldap = models.BooleanField(default=False, verbose_name=\"是否ldap用户\")\n ding_userid = models.CharField(\n max_length=150, null=True, blank=True, verbose_name=\"钉钉用户ID\")\n feishu_userid = models.CharField(\n max_length=120, null=True, blank=True, verbose_name=\"飞书UserID\")\n feishu_unionid = models.CharField(\n max_length=120, null=True, blank=True, verbose_name='飞书UnionID')\n feishu_openid = models.CharField(\n max_length=120, null=True, blank=True, verbose_name='飞书OpenID')\n\n @property\n def name(self):\n if self.first_name:\n return self.first_name\n if self.last_name:\n return self.last_name\n return self.username\n\n def __str__(self):\n return self.name\n\n class ExtMeta:\n related = True\n dashboard = False\n icon = 'peoples'\n\n class Meta:\n db_table = 'ucenter_userprofile'\n default_permissions = ()\n verbose_name = \"用户信息\"\n verbose_name_plural = verbose_name\n ordering = ['id']" }, { "identifier": "SystemConfig", "path": "dbapp/model/model_ucenter.py", "snippet": "class SystemConfig(TimeAbstract):\n name = models.CharField(max_length=64, default='',\n unique=True, verbose_name='名称')\n config = EncryptedJsonField(default=dict, verbose_name='配置')\n status = models.BooleanField(default=False, verbose_name='启用')\n type = models.CharField(max_length=64, default='', verbose_name='类型')\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = 'ucenter_systemconfig'\n default_permissions = ()\n verbose_name = '系统设置'\n verbose_name_plural = verbose_name + '管理'" }, { "identifier": "Organization", "path": "dbapp/model/model_ucenter.py", "snippet": "class Organization(TimeAbstract, CommonParent):\n \"\"\"\n 组织架构\n \"\"\"\n organization_type_choices = (\n (\"company\", \"公司\"),\n (\"department\", \"部门\")\n )\n dept_id = models.CharField(\n max_length=32, unique=True, null=True, blank=True, verbose_name='部门ID')\n name = models.CharField(max_length=60, verbose_name=\"名称\")\n leader_user_id = models.CharField(\n max_length=64, null=True, blank=True, verbose_name=\"部门领导ID\")\n type = models.CharField(\n max_length=20, choices=organization_type_choices, default=\"company\", verbose_name=\"类型\")\n dn = models.CharField(max_length=120, null=True,\n blank=True, unique=True, verbose_name=\"ldap dn\")\n\n @property\n def full(self):\n l = []\n self.get_parents(l)\n return l\n\n def get_parents(self, parent_result: list):\n if not parent_result:\n parent_result.append(self)\n parent_obj = self.parent\n if parent_obj:\n parent_result.append(parent_obj)\n parent_obj.get_parents(parent_result)\n\n def __str__(self):\n return self.name\n\n class ExtMeta:\n related = True\n dashboard = False\n\n class Meta:\n db_table = 'ucenter_organization'\n default_permissions = ()\n verbose_name = \"组织架构\"\n verbose_name_plural = verbose_name" }, { "identifier": "DataDict", "path": "dbapp/model/model_ucenter.py", "snippet": "class DataDict(CommonParent):\n key = models.CharField(max_length=80, unique=True, verbose_name='键')\n value = models.CharField(max_length=80, verbose_name='值')\n extra = models.TextField(null=True, blank=True,\n default='', verbose_name='额外参数')\n desc = models.CharField(max_length=255, blank=True,\n null=True, verbose_name='备注')\n\n def __str__(self):\n return self.value\n\n class Meta:\n db_table = 'ucenter_datadict'\n default_permissions = ()\n verbose_name = '字典'\n verbose_name_plural = verbose_name + '管理'" }, { "identifier": "Workflow", "path": "dbapp/model/model_workflow.py", "snippet": "class Workflow(TimeAbstract):\n \"\"\"\n 工单\n \"\"\"\n\n class STATUS:\n close = '已关闭'\n revoke = '已撤回'\n reject = '被驳回'\n wait = '待处理'\n complete = '已完成'\n failed = '执行失败'\n\n choices = (\n (close, close),\n (revoke, revoke),\n (reject, reject),\n (wait, wait),\n (complete, complete),\n (failed, failed)\n )\n\n wid = models.CharField(max_length=40, null=True, blank=True, unique=True, verbose_name='工单号',\n help_text='前端不需要传值')\n topic = models.CharField(max_length=200, verbose_name='工单标题')\n node = models.CharField(max_length=50, verbose_name='当前节点名')\n status = models.CharField(\n max_length=30, choices=STATUS.choices, verbose_name='工单状态')\n creator = models.ForeignKey(\n UserProfile, null=True, on_delete=models.SET_NULL, verbose_name='发起人')\n template = models.ForeignKey(\n WorkflowTemplateRevisionHistory, verbose_name='模板副本', on_delete=models.PROTECT)\n comment = models.CharField(\n max_length=200, null=True, blank=True, verbose_name='备注')\n extra = models.JSONField(default=dict, verbose_name='扩展数据')\n workflow_flag = models.CharField(\n max_length=8, default='normal', verbose_name='工单标记', help_text='normal: 普通, app: 发版应用, sql: SQL工单')\n\n @property\n def cur_node_conf(self):\n for node_conf in self.template.nodes:\n if node_conf['name'] == self.node:\n return node_conf\n\n def generate_wid(self, save=False):\n st = shortuuid.ShortUUID()\n st.set_alphabet(\"0123456789\")\n self.wid = f\"{datetime.now().strftime('%Y%m%d%H%M%S')}{st.random(length=3)}\"\n if save is True:\n self.save()\n\n class Meta:\n db_table = 'workflow_workflow'\n ordering = ['-id']\n\n def __str__(self):\n return f'{self.template.id}@{self.template.name}-{self.topic}#{self.wid}-{self.status}'" }, { "identifier": "WorkflowNodeHistory", "path": "dbapp/model/model_workflow.py", "snippet": "class WorkflowNodeHistory(models.Model):\n \"\"\"\n 已处理的节点历史记录\n \"\"\"\n\n class HandleType(models.TextChoices):\n \"\"\"\n 触发类型\n \"\"\"\n PASSED = 'passed', '通过'\n REJECT = 'reject', '驳回'\n REVOKE = 'revoke', '撤回'\n CLOSE = 'close', '关闭'\n ERROR = 'error', '回调错误'\n\n workflow = models.ForeignKey(\n Workflow, on_delete=models.PROTECT, verbose_name='所属工单')\n node = models.CharField(max_length=50, verbose_name='节点')\n handle_type = models.CharField(\n max_length=50, null=True, choices=HandleType.choices, verbose_name='操作类型')\n operator = models.ForeignKey(\n UserProfile, null=True, on_delete=models.SET_NULL, verbose_name='操作人')\n form = models.JSONField(blank=True, null=True, verbose_name='表单数据')\n created_time = models.DateTimeField(\n auto_now_add=True, null=True, blank=True, verbose_name='创建时间')\n\n @property\n def node_conf(self):\n for node in self.workflow.template.nodes:\n if node['name'] == self.node:\n return node\n\n def __str__(self):\n return f'{self.workflow.topic}-{self.node}'\n\n class Meta:\n db_table = 'workflow_workflownodehistory'\n ordering = ['-id']" }, { "identifier": "WorkflowNodeHistoryCallback", "path": "dbapp/model/model_workflow.py", "snippet": "class WorkflowNodeHistoryCallback(CreateTimeAbstract):\n \"\"\"\n 工单回调信息表\n 记录所有工单节点在执行后触发的回调, 以及回调的相关信息\n \"\"\"\n\n class TriggerType(models.TextChoices):\n \"\"\"\n 触发类型\n \"\"\"\n AUTO = 'auto', '自动'\n MANUAL = 'manual', '手动'\n\n class CallbackType(models.TextChoices):\n \"\"\"\n 回调类型\n \"\"\"\n ALL = 'all', '提交后调用'\n PASSED = 'passed', '通过后调用'\n REJECT = 'reject', '驳回后调用'\n\n class Status(models.TextChoices):\n \"\"\"\n 回调类型\n \"\"\"\n PENDING = 'pending', '待响应'\n ERROR = 'error', '执行出错'\n SUCCESS = 'success', '执行成功'\n RETRY = 'retry', '已重试'\n\n node_history = models.ForeignKey(WorkflowNodeHistory, null=True, blank=True, on_delete=models.PROTECT,\n verbose_name='节点历史')\n trigger = models.ForeignKey(\n UserProfile, null=True, on_delete=models.SET_NULL, verbose_name='触发人')\n callback_url = models.CharField(max_length=250, verbose_name='回调URL')\n callback_type = models.CharField(\n max_length=250, choices=CallbackType.choices, verbose_name='回调类型')\n trigger_type = models.CharField(\n max_length=50, choices=TriggerType.choices, verbose_name='触发类型')\n status = models.CharField(\n max_length=50, default=Status.PENDING, choices=Status.choices, verbose_name='响应状态码')\n response_code = models.IntegerField(default=0, verbose_name='响应状态码')\n response_result = models.TextField(default='', verbose_name='回调结果')\n response_time = models.DateTimeField(\n null=True, blank=True, verbose_name='响应时间')\n\n class Meta:\n db_table = 'workflow_workflownodehistorycallback'\n ordering = ['-id']" }, { "identifier": "WorkflowTemplateRevisionHistory", "path": "dbapp/model/model_workflow.py", "snippet": "class WorkflowTemplateRevisionHistory(WorkflowTemplateAbstract):\n \"\"\"\n 工单模板版本历史保存\n 创建工单的时候检查当前模板版本号是否在本模型中存在\n 如果不存在, 从 TicketTemplate 复制一份到这边。\n \"\"\"\n name = models.CharField(max_length=100, verbose_name='工单模板名')\n\n class Meta:\n db_table = 'workflow_workflowtemplaterevisionhistory'" }, { "identifier": "AnsibleApi", "path": "common/utils/AnsibleCallback.py", "snippet": "class AnsibleApi(BaseAnsibleApi):\n def __init__(self, redis_conn, chan, jid, channel, *args, on_any_callback=None, **kwargs):\n super(AnsibleApi, self).__init__(*args, **kwargs)\n\n self.playbook_callback = PlayBookResultsCollector(redis_conn, chan, jid, channel,\n on_any_callback=on_any_callback)\n self.channel = channel\n self.redis_conn = redis_conn\n self.jid = jid" }, { "identifier": "GlueJenkins", "path": "common/utils/JenkinsAPI.py", "snippet": "class GlueJenkins(Jenkins):\n\n def __init__(self, url=None, username=None, password=None):\n self.__url = url\n self.__username = username\n self.__password = password\n super(GlueJenkins, self).__init__(\n self.__url, self.__username, self.__password)\n\n def _get_encoded_params(self, params):\n for k, v in params.items():\n if k in [\"name\", \"msg\", \"short_name\", \"from_short_name\",\n \"to_short_name\", \"folder_url\", \"from_folder_url\", \"to_folder_url\"]:\n params[k] = quote(v.encode('utf8'))\n return params\n\n def _build_url(self, format_spec, variables=None):\n\n if variables:\n url_path = format_spec % self._get_encoded_params(variables)\n else:\n url_path = format_spec\n return str(urljoin(self.server, url_path))\n\n def assert_credential_exists(self, name, folder_name=None, domain_name='_',\n exception_message='credential[%s] does not exist.'):\n '''Raise an exception if credential does not exist in domain of folder\n\n :param name: Name of credential, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :param exception_message: Message to use for the exception.\n Formatted with ``name``, ``domain_name``,\n and ``folder_name``\n :throws: :class:`JenkinsException` whenever the credentail\n does not exist in domain of folder\n '''\n if not self.credential_exists(name, folder_name, domain_name):\n raise JenkinsException(exception_message\n % name)\n\n def get_credential_global_config(self, name, domain_name='_'):\n '''Get configuration of credential in domain of folder.\n :param name: Name of credentail, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Credential configuration (XML format)\n '''\n return self.jenkins_open(requests.Request(\n 'GET', self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n ))\n\n def get_credential_info(self, name, folder_name=None, domain_name='_'):\n '''Get credential information dictionary in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: folder_name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Dictionary of credential info, ``dict``\n '''\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(CREDENTIAL_INFO_GLOBAL, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('credential[%s] does not exist.' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('credential[%s] does not exist.' % name)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for credential[%s].' % name\n )\n\n def credential_exists(self, name, folder_name=None, domain_name='_'):\n '''Check whether a credentail exists in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: ``True`` if credentail exists, ``False`` otherwise\n '''\n try:\n return self.get_credential_info(name)['id'] == name\n except JenkinsException:\n return False\n\n def create_credential_global(self, name=None, user=None, password=None, secret=None, comment=None, domain_name='_'):\n '''Create credentail in domain of folder\n\n :param name: username\n :param password: password\n :param comment: comment, ``str``\n :param config_xml: New XML configuration, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n '''\n st = shortuuid.ShortUUID()\n st.set_alphabet(\n f\"0123456789{''.join([chr(i) for i in range(ord('a'), ord('z') + 1)])}\")\n if name is None:\n name = '-'.join(['api', st.random(length=8),\n st.random(length=4), st.random(length=12)])\n config_xml = '''<com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <username>%s</username>\n <password>%s</password>\n</com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>''' % (name, comment, user, password)\n if user is None:\n config_xml = '''<org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <secret>%s</secret>\n</org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>''' % (name, comment, secret)\n if self.credential_exists(name):\n raise JenkinsException('credential[%s] already exists.' % name)\n\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_CREDENTIAL_GLOBAL, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n self.assert_credential_exists(\n name, exception_message='create credential[%s] failed.')\n return {'status': 0, 'data': name}\n\n def reconfig_credential_global(self, name, user=None, password=None, secret=None, comment=None, domain_name='_'):\n \"\"\"\n Reconfig credential with new config in domain of folder\n :param name: name, ``str``\n :param user:\n :param password:\n :param secret:\n :param comment:\n :param domain_name: Domain name, default is '_', ``str``\n :return:\n \"\"\"\n reconfig_url = self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n config_xml = self.get_credential_global_config(name)\n xml_dict = xmltodict.parse(config_xml)\n if user is None:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['secret'] = secret\n if comment:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['description'] = comment\n else:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['username'] = user\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['password'] = password\n if comment:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl'][\n 'description'] = comment\n config_xml = xmltodict.unparse(xml_dict, pretty=True)\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def create_job(self, name, config_xml):\n '''Create a new Jenkins job\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: config file text, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n if self.job_exists(name):\n raise JenkinsException('job[%s] already exists' % (name))\n\n try:\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_JOB, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n except NotFoundException:\n raise JenkinsException('Cannot create job[%s] because folder '\n 'for the job does not exist' % (name))\n self.assert_job_exists(name, 'create[%s] failed')\n\n def reconfig_job(self, name, config_xml):\n '''Change configuration of existing Jenkins job.\n\n To create a new job, see :meth:`Jenkins.create_job`.\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: New XML configuration, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n reconfig_url = self._build_url(CONFIG_JOB, locals())\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def get_stage_describe(self, name, number, node_number):\n \"\"\" 获取 单个stage 详情 \"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_DES, locals())\n ))\n\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_logs(self, name, number, node_number):\n \"\"\" 获取 stage 执行日志\"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_LOG, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_info(self, name, number, depth=0):\n\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_INFO, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_flow_detail(self, job_name, build_number):\n stage_data = self.get_stage_info(name=job_name, number=build_number)\n stages = stage_data.get('stages')\n for i in stages:\n logs = ''\n try:\n # 获取stage返回信息\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(i['_links']['self']['href']), locals())\n ))\n if response:\n res = json.loads(response)\n for j in res['stageFlowNodes']:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(j['_links']['log']['href']), locals())\n ))\n res = json.loads(response)\n try:\n # 移除href html信息,保留链接文字\n import re\n pat = re.compile('<a href[^>]*>')\n logs = logs + '\\n' + \\\n pat.sub('', res['text'].replace('</a>', ''))\n except:\n pass\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (job_name, build_number)\n )\n\n stage_data[\"stages\"][stages.index(i)]['logs'] = logs\n return stage_data\n\n def get_queue_item(self, number, depth=0):\n '''Get information about a queued item (to-be-created job).\n\n The returned dict will have a \"why\" key if the queued item is still\n waiting for an executor.\n\n The returned dict will have an \"executable\" key if the queued item is\n running on an executor, or has completed running. Use this to\n determine the job number / URL.\n\n :param name: queue number, ``int``\n :returns: dictionary of queued information, ``dict``\n '''\n url = self._build_url(Q_ITEM, locals())\n try:\n response = self.jenkins_open(requests.Request('GET', url))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('queue number[%d] does not exist'\n % number)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('queue number[%d] does not exist' % number)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for queue number[%d]' % number\n )\n\n def build_job(self, name, parameters=None, token=None):\n '''Trigger build job.\n\n This method returns a queue item number that you can pass to\n :meth:`Jenkins.get_queue_item`. Note that this queue number is only\n valid for about five minutes after the job completes, so you should\n get/poll the queue information as soon as possible to determine the\n job's URL.\n\n :param name: name of job\n :param parameters: parameters for job, or ``None``, ``dict``\n :param token: Jenkins API token\n :returns: ``int`` queue item\n '''\n response = self.jenkins_request(requests.Request(\n 'POST', self.build_job_url(name, parameters, token)))\n\n if 'Location' not in response.headers:\n raise EmptyResponseException(\n \"Header 'Location' not found in \"\n \"response from server[%s]\" % self.server)\n\n location = response.headers['Location']\n if location.endswith('/'):\n location = location[:-1]\n parts = location.split('/')\n number = int(parts[-1])\n return number\n\n def get_job_config(self, name):\n '''Get configuration of existing Jenkins job.\n\n :param name: Name of Jenkins job, ``str``\n :returns: job configuration (XML format)\n '''\n folder_url, short_name = self._get_job_folder(name)\n request = requests.Request(\n 'GET', self._build_url(CONFIG_JOB, locals()))\n return self.jenkins_open(request)\n\n def get_job_info(self, name, depth=0, fetch_all_builds=False):\n '''Get job information dictionary.\n\n :param name: Job name, ``str``\n :param depth: JSON depth, ``int``\n :param fetch_all_builds: If true, all builds will be retrieved\n from Jenkins. Otherwise, Jenkins will\n only return the most recent 100\n builds. This comes at the expense of\n an additional API call which may\n return significant amounts of\n data. ``bool``\n :returns: dictionary of job information\n '''\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(JOB_INFO, locals())\n ))\n if response:\n if fetch_all_builds:\n return self._add_missing_builds(json.loads(response))\n else:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] does not exist' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] does not exist' % name)\n except ValueError:\n raise JenkinsException(\n \"Could not parse JSON info for job[%s]\" % name)" }, { "identifier": "HarborAPI", "path": "common/utils/HarborAPI.py", "snippet": "class HarborAPI(object):\n def __init__(self, url, username, password):\n self.__url = url.rstrip('/')\n self.__user = username\n self.__password = password\n self.__token = base64.b64encode(\n bytes('%s:%s' % (self.__user, self.__password), encoding='utf-8'))\n self.__headers = dict()\n self.__headers[\"Accept\"] = \"application/json\"\n self.__headers['authorization'] = 'Basic %s' % str(\n self.__token, encoding='utf-8')\n\n def request(self, method, obj=None, prefix='/'):\n try:\n if method == 'get':\n req = requests.request(method, '%s%s' % (self.__url, prefix), params=obj, headers=self.__headers,\n verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.json(), 'count': req.headers.get('X-Total-Count', None),\n 'next': req.headers.get('Link', None)}\n if method == 'delete':\n req = requests.request(method, '%s%s' % (\n self.__url, prefix), headers=self.__headers, verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.content}\n if method in ['put', 'post']:\n req = requests.request(method, '%s%s' % (self.__url, prefix), json=obj, headers=self.__headers,\n verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.content}\n if method == 'head':\n req = requests.request(method, '%s%s' % (\n self.__url, prefix), headers=self.__headers, verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.content}\n except BaseException as e:\n raise e\n return res\n\n def systeminfo(self):\n res = self.request('get', prefix='/systeminfo')\n return res\n\n def get_users(self):\n res = self.request('get', prefix='/users')\n return res\n\n def get_projects(self, project_name=None, page=1, page_size=20):\n \"\"\"\n :project_name: The name of project\n :page: default is 1.\n :page_size: default is 10, maximum is 100.\n \"\"\"\n params = {'page': page, 'page_size': page_size}\n if project_name:\n params['name'] = project_name\n try:\n res = self.request('get', params, prefix='/projects')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def get_repositories(self, project_id, page=1, page_size=20, repo=None):\n params = {'project_id': project_id,\n 'page': page, 'page_size': page_size}\n if repo:\n params['q'] = repo\n try:\n res = self.request('get', params, '/repositories')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def get_tags(self, repo):\n try:\n res = self.request('get', prefix='/repositories/%s/tags' % repo)\n tags = [\n {'name': i['name'], 'created': i['created'], 'push_time': i.get(\n 'push_time', None), 'size': i['size']}\n for i in\n res['data']]\n tags.sort(key=lambda k: (k.get('created')), reverse=True)\n return {'ecode': 200, 'data': tags, 'count': len(tags)}\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def fetch_project(self, project_id):\n \"\"\"\n 获取项目信息\n \"\"\"\n try:\n res = self.request(\n 'get', {'project_id': project_id}, prefix=f'/projects/{project_id}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def fetch_tag(self, repo, tag):\n \"\"\"\n 获取指定镜像标签\n \"\"\"\n try:\n res = self.request(\n 'get', prefix=f'/repositories/{repo}/tags/{tag}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def create_project(self, project_name, public=True):\n \"\"\"\n 创建仓库项目\n \"\"\"\n try:\n data = {'project_name': project_name, 'metadata': {\n 'public': 'true' if public else 'false'}}\n res = self.request('post', obj=data, prefix='/projects')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def update_project(self, project_id, *args, **kwargs):\n \"\"\"\n 更新仓库项目\n \"\"\"\n try:\n res = self.request('put', obj=kwargs,\n prefix=f'/projects/{project_id}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def project_exists(self, project_name):\n \"\"\"\n 查询项目是否存在\n \"\"\"\n try:\n res = self.request(\n 'head', prefix=f'/projects?project_name={project_name}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def patch_tag(self, repo, src_image, tag_name):\n \"\"\"\n 镜像打标签\n \"\"\"\n try:\n try:\n # 创建仓库项目\n res = self.create_project(repo.split('/')[0])\n except BaseException as e:\n pass\n data = {'tag': tag_name, 'src_image': src_image, 'override': True}\n res = self.request(\n 'post', obj=data, prefix='/repositories/%s/tags' % repo)\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def delete_tag(self, repo, tag):\n \"\"\"\n 删除标签\n \"\"\"\n try:\n res = self.request(\n 'delete', prefix=f'/repositories/{repo}/tags/{tag}')\n return res\n except BaseException as e:\n logger.ex\n return {'ecode': 500, 'message': e}\n\n def search(self, query):\n \"\"\"\n 搜索\n \"\"\"\n try:\n res = self.request('get', {'q': query}, prefix='/search')\n return res\n except BaseException as e:\n logger.exception(e)\n return {'ecode': 500, 'message': e}" }, { "identifier": "RedisManage", "path": "common/utils/RedisAPI.py", "snippet": "class RedisManage(object):\n\n @classmethod\n def conn(cls):\n if REDIS_CLUSTER_CONFIG.get('startup_nodes', None):\n pool = ClusterConnectionPool(startup_nodes=REDIS_CLUSTER_CONFIG['startup_nodes'],\n password=REDIS_CLUSTER_CONFIG.get(\n 'password', ''),\n nodemanager_follow_cluster=True,\n decode_responses=True, )\n return RedisCluster(connection_pool=pool, nodemanager_follow_cluster=True)\n pool = redis.ConnectionPool(host=REDIS_CONFIG['host'], port=REDIS_CONFIG['port'], db=REDIS_CONFIG['db'],\n password=REDIS_CONFIG.get('password', ''), decode_responses=True)\n return redis.Redis(connection_pool=pool)\n\n @staticmethod\n def get_pubsub():\n r = redis.StrictRedis(host=REDIS_CONFIG['host'], port=REDIS_CONFIG['port'], db=REDIS_CONFIG['db'],\n password=REDIS_CONFIG.get('password', ''))\n return r.pubsub(ignore_subscribe_messages=True)" }, { "identifier": "AesCipher", "path": "common/utils/AesCipher.py", "snippet": "class AesCipher(object):\n def __init__(self, secret_key='Devops SecretKey'):\n self.__secret_key = secret_key\n self.__aes = AES.new(str.encode(self.__secret_key), AES.MODE_ECB)\n\n def encrypt(self, data):\n while len(data) % 16 != 0: # 补足字符串长度为16的倍数\n data += (16 - len(data) % 16) * chr(16 - len(data) % 16)\n cipher_data = str(base64.encodebytes(self.__aes.encrypt(str.encode(data))), encoding='utf8').replace('\\n', '')\n return cipher_data\n\n def decrypt(self, cipher_data):\n try:\n decrypted_text = self.__aes.decrypt(base64.decodebytes(bytes(cipher_data, encoding='utf8'))).decode(\"utf8\")\n decrypted_text = decrypted_text[:-ord(decrypted_text[-1])] # 去除多余补位\n return decrypted_text\n except BaseException as e:\n print('data', e)\n raise Exception(e)" }, { "identifier": "OmsMail", "path": "common/MailSend.py", "snippet": "class OmsMail(object):\n def __init__(self):\n self.__email_config = get_redis_data('mail')\n self.__master = None\n if self.__email_config:\n self.__master = self.__email_config.get('user', None)\n self.__url = get_redis_data('platform')['url']\n\n def send_mail(self, title, msg, receiver, is_html=False):\n self.__send_mail(title, msg, receiver, is_html=is_html)\n\n def __send_mail(self, title, msg, receiver, is_html=False):\n \"\"\"\n\n :param title:\n :param msg:\n :param receiver: '[email protected],[email protected]'\n :return:\n \"\"\"\n try:\n html_message = ''\n if is_html:\n html_message = msg\n send_mail(\n f\"{self.__email_config['prefix']}{title}\",\n msg,\n self.__master, receiver.split(','),\n html_message=html_message\n )\n return {'status': 0}\n except Exception as e:\n print('err', e)\n return {'status': 1, 'msg': '发送邮件通知失败 %s' % str(e)}\n\n def ticket_process(self, ticket, title, status, user, receiver):\n msg = f\"Hi {user},\\n你有新的工单{ticket}(标题:{title}){TICKET_STATUS[status]}。\\n请访问{self.__url} 进行处理。\"\n self.__send_mail('工单跟踪', msg, receiver)\n\n def ticket_handle(self, ticket, title, status, user, receiver):\n msg = f\"Hi {user},\\n工单{ticket}(标题:{title}){TICKET_STATUS[status]}。\\n请访问{self.__url} 进行处理。\"\n self.__send_mail('工单跟踪', msg, receiver)\n\n def ticket_create(self, ticket, title, status, user, receiver):\n mail_title = '工单处理结果'\n if status == 4:\n mail_title = '工单处理中'\n msg = f\"Hi {user},\\n你的工单{ticket}(标题:{title}){TICKET_STATUS[status]}。\\n请访问{self.__url} 查看更多信息。\"\n self.__send_mail(mail_title, msg, receiver)\n\n def account_register(self, op_user, username, password, user, receiver):\n msg = f\"Hi {user},\\n{op_user}已为你开通平台账号,相关信息如下:\\n用户名:{username}\\n密码:{password}\\n登录地址:{self.__url}。\"\n self.__send_mail('账号开通', msg, receiver)\n\n def deploy_notify(self, title, msg, receiver):\n self.__send_mail(title, msg, receiver)\n\n def test_notify(self, receiver):\n ret = self.__send_mail('邮件测试', \"Hi,如果能看到此邮件,说明平台邮件服务配置成功\", receiver)\n return ret" }, { "identifier": "get_datadict", "path": "common/ext_fun.py", "snippet": "def get_datadict(name, config=0, default_value=None):\n \"\"\"\n 从数据字典获取数据\n \"\"\"\n try:\n qs = DataDict.objects.get(key=name)\n except BaseException as e:\n return default_value\n if config:\n ret = json.loads(qs.extra)\n else:\n ret = {'id': qs.id, 'key': qs.key,\n 'value': qs.value, 'desc': qs.desc}\n return ret" }, { "identifier": "get_datadict", "path": "common/ext_fun.py", "snippet": "def get_datadict(name, config=0, default_value=None):\n \"\"\"\n 从数据字典获取数据\n \"\"\"\n try:\n qs = DataDict.objects.get(key=name)\n except BaseException as e:\n return default_value\n if config:\n ret = json.loads(qs.extra)\n else:\n ret = {'id': qs.id, 'key': qs.key,\n 'value': qs.value, 'desc': qs.desc}\n return ret" }, { "identifier": "get_redis_data", "path": "common/ext_fun.py", "snippet": "def get_redis_data(name):\n ret = cache.get(f\"system:{name}\")\n if not ret:\n try:\n if name == 'cicd-harbor':\n qs = SystemConfig.objects.filter(type=name)[0]\n else:\n qs = SystemConfig.objects.get(name=name)\n except BaseException as e:\n return None\n ret = json.loads(qs.config)\n set_redis_data(name, ret)\n\n return ret" }, { "identifier": "k8s_cli", "path": "common/ext_fun.py", "snippet": "def k8s_cli(k8s, k8s_config):\n try:\n if k8s_config['type'] == 'basic':\n # basic auth or token auth\n k8s_config.pop('config', None)\n k8s_config.pop('type', None)\n cli = K8sAPI(**k8s_config)\n else:\n eks = None\n eks_token = None\n k8s_config = yaml.safe_load(k8s_config['config'])\n if k8s.idc.type == 1 and k8s.idc.supplier.split('.')[-1] == 'aws':\n return False, 'not support.'\n cli = K8sAPI(k8s_config=k8s_config, api_key=eks_token, eks=eks)\n return True, cli\n except BaseException as e:\n return False, str(e)" }, { "identifier": "set_redis_data", "path": "common/ext_fun.py", "snippet": "def set_redis_data(name, config):\n cache.set(f\"system:{name}\", config, None)" }, { "identifier": "template_svc_generate", "path": "common/ext_fun.py", "snippet": "def template_svc_generate(appinfo_obj):\n \"\"\"\n 生成Kubernetes Svc Yaml\n\n ### 格式:\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Service\",\n \"metadata\": {\n \"name\": \"appname\",\n \"namespace\": \"env-product\",\n \"labels\": {\n \"app\": \"appname\"\n }\n },\n \"spec\": {\n \"ports\": [{\n \"port\": 8080,\n \"targetPort\": 8080,\n \"protocol\": \"TCP\",\n \"name\": \"http\"\n }],\n \"selector\": {\n \"app\": \"appname\"\n }\n }\n }\n \"\"\"\n svc_temp = DataDict.objects.filter(key='yaml.svc')\n if svc_temp.exists():\n svc_temp = json.loads(svc_temp.first().extra)\n if appinfo_obj.environment.name in svc_temp:\n svc_temp = svc_temp[appinfo_obj.environment.name]\n namespace = appinfo_obj.namespace\n svc_temp['metadata']['name'] = appinfo_obj.app.name\n svc_temp['metadata']['namespace'] = namespace\n svc_temp['metadata']['labels'] = {'app': appinfo_obj.app.name}\n\n labels = []\n labels.extend([{'name': 'app', 'value': appinfo_obj.app.name}])\n\n svc_temp['spec']['selector'] = {\n i['name']: i['value'] for i in labels}\n return True, svc_temp\n return False, None" }, { "identifier": "get_project_mergerequest", "path": "common/ext_fun.py", "snippet": "def get_project_mergerequest(project: Project, cli: GitLabAPI, **params):\n \"\"\"\n 获取项目下所有应用的合并请求\n \"\"\"\n mrdata = []\n git_project = [app.repo['id']\n for app in project.microapp_set.all() if app.repo.get('id')]\n for project_id in set(git_project):\n try:\n git_project = cli.get_project(project_id)\n ok, data = cli.list_mrs(project=git_project, **params)\n if ok is False:\n continue\n mrdata.extend([i.attributes for i in data])\n except BaseException as e:\n logger.error(f'获取应用合并请求异常,原因:{e}')\n return mrdata" }, { "identifier": "convert_xml_to_str_with_pipeline", "path": "common/custom_format.py", "snippet": "def convert_xml_to_str_with_pipeline(xml, url, secret, desc, jenkinsfile, scm=True):\n \"\"\"\n scm\n True: jenkinsfile为指定的git地址\n False: jenkinsfile为具体的pipeline\n \"\"\"\n xml_dict = xmltodict.parse(xml)\n if scm:\n xml_dict['flow-definition']['definition']['@class'] = 'org.jenkinsci.plugins.workflow.cps.CpsScmFlowDefinition'\n xml_dict['flow-definition']['definition']['scm']['userRemoteConfigs']['hudson.plugins.git.UserRemoteConfig'][\n 'url'] = url\n xml_dict['flow-definition']['definition']['scm']['userRemoteConfigs']['hudson.plugins.git.UserRemoteConfig'][\n 'credentialsId'] = secret\n xml_dict['flow-definition']['definition']['scriptPath'] = jenkinsfile\n else:\n xml_dict['flow-definition']['definition']['@class'] = 'org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition'\n xml_dict['flow-definition']['definition']['script'] = jenkinsfile\n xml_dict['flow-definition']['definition']['sandbox'] = 'true'\n xml_dict['flow-definition']['description'] = desc\n result = xmltodict.unparse(\n xml_dict, short_empty_elements=True, pretty=True)\n return result" } ]
from collections import OrderedDict from django.core.cache import cache from django_q.tasks import async_task, AsyncTask, schedule from django_q.models import Schedule from django.db import transaction from django.utils import timezone from celery_tasks.celery import app from channels.layers import get_channel_layer from asgiref.sync import async_to_sync, sync_to_async from channels.db import database_sync_to_async from dbapp.model.model_cmdb import KubernetesDeploy, MicroApp, Project from dbapp.models import KubernetesCluster, Idc, AppInfo, Environment from common.utils.GitLabAPI import GitLabAPI from dbapp.model.model_deploy import BuildJob, DeployJob, PublishApp, PublishOrder, BuildJobResult from dbapp.model.model_ucenter import UserProfile, SystemConfig, Organization, DataDict from dbapp.model.model_workflow import Workflow, WorkflowNodeHistory, WorkflowNodeHistoryCallback, WorkflowTemplateRevisionHistory from workflow.callback_common import callback_work from kubernetes import client, config, watch from kubernetes.client import ApiClient from common.utils.AnsibleCallback import AnsibleApi from common.utils.JenkinsAPI import GlueJenkins from common.utils.HarborAPI import HarborAPI from common.utils.RedisAPI import RedisManage from common.utils.AesCipher import AesCipher from common.MailSend import OmsMail from common.ext_fun import get_datadict, get_datadict, get_redis_data, k8s_cli, set_redis_data, template_svc_generate, \ get_project_mergerequest from common.custom_format import convert_xml_to_str_with_pipeline from common.variables import * from config import FEISHU_URL, MEDIA_ROOT, SOCIAL_AUTH_FEISHU_KEY, SOCIAL_AUTH_FEISHU_SECRET, SOCIAL_AUTH_GITLAB_API_URL from ruamel import yaml import logging import sys import xlsxwriter import asyncio import datetime import json import time import pytz import itertools
21,100
ret.append(cli.delete_namespace_deployment( kwargs['app_name'], kwargs['namespace'], api_version)) ret.append(cli.delete_namespace_service( kwargs['app_name'], kwargs['namespace'], api_version)) return ret if resource in ['service', 'services']: ret.append(cli.delete_namespace_service( kwargs['app_name'], kwargs['namespace'], api_version)) return ret if resource == 'configmap': ret.append(cli.delete_namespace_configmap( kwargs['app_name'], kwargs['namespace'], api_version)) return ret @app.task def k8s_service_create(cluster_id, k8s_config, name, targets, namespace='default', service_type='NodePort'): k8s = KubernetesCluster.objects.get(id=cluster_id) cli = k8s_cli(k8s, k8s_config) if not cli[0]: return {'job': '创建Service', 'msg': 'Kubernetes配置异常,请联系运维!'} cli = cli[1] ret = cli.create_namespace_service(name, targets, namespace, service_type) return {'job': '创建Deployment', 'msg': ret} @app.task def watch_k8s_update(k8s_config, name, namespace): config.kube_config.load_kube_config_from_dict(yaml.safe_load(k8s_config)) cli = client.AppsV1beta2Api() count = 200 w = watch.Watch() for event in w.stream(cli.read_namespaced_deployment(name, namespace), timeout_seconds=10): print("Event: %s %s %s" % ( event['type'], event['object'].kind, event['object'].metadata.name), event ) count -= 1 if not count: w.stop() print("Finished pod stream.") @app.task def watch_k8s_deployment(*args): """ 实时获取前后端应用发布中的日志 :param args: :return: """ channel_name = args[0] job_id = args[1] app_id = args[2] channel_layer = get_channel_layer() job = DeployJob.objects.get(id=job_id) redis_conn = RedisManage().conn() _flag = True count = 0 while _flag: app_deploy_stat = cache.get(f'appdeploy:stat:{job.id}') msg = cache.get(f'appdeploy:{job.id}') if not app_deploy_stat: async_to_sync(channel_layer.send)( channel_name, { "type": "send.message", "message": json.dumps(msg) } ) time.sleep(0.5) continue count += 1 async_to_sync(channel_layer.send)( channel_name, { "type": "send.message", "message": isinstance(msg, str) and msg or json.dumps(msg) } ) if count > 5: # 部署结束, 发送 devops-done time.sleep(3) async_to_sync(channel_layer.send)( channel_name, { "type": "send.message", "message": 'devops-done' } ) _flag = False if DeployJob.objects.get(id=job_id).status != 3: cache.delete(f'appdeploy:{job.id}') redis_conn.delete(job.id) @app.task def workflow_email_notice(title, msg, receiver): # 邮件发送 mail = OmsMail() mail.send_mail(title, msg, receiver, is_html=True) logger.debug(f'工单系统 邮件发送 {receiver}') def workflow_callback(callback_type, workflow_node_history_id, workflow_node_history_callback_id, method, url, headers=None, cookies=None, timeout=30): workflow_node_history_obj = WorkflowNodeHistory.objects.get( id=workflow_node_history_id) workflow_node_history_callback_obj = WorkflowNodeHistoryCallback.objects.get( id=workflow_node_history_callback_id) try: workflow_obj = workflow_node_history_obj.workflow first_node_name = workflow_obj.template.nodes[0]['name'] first_node_form = WorkflowNodeHistory.objects.filter( workflow=workflow_obj, node=first_node_name).first().form result = callback_work( callback_type, method, url,
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : [email protected] @Time : 2020/5/19 下午5:42 @FileName: tasks.py @Company : Vision Fund """ from __future__ import unicode_literals logger = logging.getLogger(__name__) # 机器人类型 ROBOT_CATEGORIES = {} def clean_image_task(*args): """ 调用Harbor API清理镜像 """ appinfo_obj = AppInfo.objects.get(id=args[0]) image = args[1] # 获取镜像保留份数 image_retain = get_datadict('IMAGE_RETAIN', config=1) repo = image.split(':')[0] # 获取app的k8s集合 k8s_cluster = appinfo_obj.kubernetes.all() for k8s in k8s_cluster: try: # 获取idc关联的harbor仓库 harbor = SystemConfig.objects.get(id=k8s.idc.repo) # 获取harbor配置 harbor_config = json.loads(harbor.config) logger.info(f'开始清理仓库{harbor.name}镜像{repo}') # 调用Harbor api推送 cli = HarborAPI(url=harbor_config['ip'] + '/api/', username=harbor_config['user'], password=harbor_config['password']) # 获取镜像标签 res = cli.get_tags(repo) # 默认保留10份镜像版本 _retain = (image_retain.get(appinfo_obj.environment.name.split('_')[-1].lower(), None) if image_retain else 10) or 10 if res['count'] > _retain: # 清理历史版本 for _t in res['data'][_retain:]: cli.delete_tag(repo, _t['name']) except BaseException as e: logger.warning(f'清理Harbor[{repo}]标签异常, 原因: {e}') def docker_image_sync(*args, **kwargs): """ 调用Harbor API同步镜像 """ # 待发版的app数组 apps = kwargs['apps'] for app in apps: appinfo_obj = AppInfo.objects.get(id=app['id']) namespace = appinfo_obj.namespace src_image = app['image']['image'] _image = src_image.split('/')[-1].split(':') image = f"{namespace}/{_image[0]}" tag = _image[1] # 获取app的k8s集合 k8s_cluster = appinfo_obj.kubernetes.all() for k8s in k8s_cluster: # 获取idc关联的harbor仓库 harbor = SystemConfig.objects.get(id=k8s.idc.repo) # 获取harbor配置 harbor_config = json.loads(harbor.config) # 调用Harbor api推送 cli = HarborAPI(url=harbor_config['ip'], username=harbor_config['user'], password=harbor_config['password']) # 检测镜像标签是否存在 res = cli.fetch_tag(image, tag) if res.get('ecode', 500) > 399: # 镜像标签不存在 res = cli.patch_tag(image, src_image, tag) if res.get('ecode', 500) > 399: # 打标签异常 sync_stat = 1 else: if isinstance(res['data'], bytes): res['data'] = res['data'].decode('utf-8') logger.info(f'{image}:{tag}镜像同步结果: {res}') else: logger.info(f'{image}:{tag}镜像存在, 不需要同步.') @app.task def deploy_notify_cron(): """ 部署消息通知定时任务 """ _keys = [v for k, v in cache.get_many(cache.keys(f'{MSG_KEY}*')).items() if isinstance(v, (dict,)) and v.get('msg_key', None)] _keys = list(set([i['msg_key'] for i in _keys])) for _key in _keys: _send = False try: # 判断是否工单, 工单状态是否完成 _check_order = _key.split(MSG_KEY)[1].split(':')[0] if _check_order.isdigit(): # 查询工单状态 if PublishOrder.objects.get(order_id=_check_order).status in [1, 2, 4]: # 标记立即发送 _send = True except BaseException as e: pass _delay = 0.1 delay = cache.get(f"{DELAY_NOTIFY_KEY}{_key}") if delay: _time_diff = (datetime.datetime.now() - delay['curr_time']).seconds # 时间差大于延时则发送消息 if _time_diff >= delay['delay']: _send = True if _send: _qkeys = cache.keys(f"{_key}:*") if _qkeys: msg = cache.get(_qkeys[0]) robot = msg['robot'] title = msg['title'] order_id = msg.get('order_id', None) deploy_notify_queue.apply_async([_key], {'cron': True, 'order_id': order_id, 'robot': robot, 'title': title}, countdown=_delay) def deploy_notify_queue(*args, **kwargs): """ CICD通知队列 """ msg_key = args[0] appid = kwargs.get('appid', None) job_cron = kwargs.get('cron', None) title = kwargs['title'] robot = kwargs['robot'] order_id = kwargs.pop('order_id', None) _keys = sorted(cache.keys(f"{msg_key}:*"), reverse=True) msg = cache.get_many(_keys) cache.delete_many(_keys) if msg: async_task(deploy_notify_send, order_id, title, msg, robot) def deploy_notify_send(order_id, title, msg, robot): """ CICD消息发送 """ try: _robot = get_redis_data(robot) robot_notify = ROBOT_CATEGORIES[_robot.get('type', 'dingtalk')]( _robot['webhook'], _robot['key']) content = '\n---\n'.join([v['msg'] for _, v in msg.items()]) recv_phone = ','.join(list( set([v['recv_phone'] for _, v in msg.items() if v.get('recv_phone', None)]))) if _robot.get('type', 'dingtalk') == 'feishu': # 飞书使用open_id at recv_phone = ','.join(list( set([v['recv_openid'] for _, v in msg.items() if v.get('recv_openid', None)]))) if order_id: content += f"\n---\n**工单ID: {order_id}** " notify_result = robot_notify.deploy_notify( content, recv_phone, title=title) if notify_result.get('status', 1) != 0: logger.error(f'部署消息[{title}]通知异常, 原因: {notify_result}') raise Exception(notify_result) logger.info(f'部署消息通知成功: {title} | {recv_phone} | {_robot} | {content}') except BaseException as e: """ 发送通知异常重试 """ logger.error(f'部署消息[{title}]通知异常, 原因: {e}') def test_notify(receiver, notify_type='mail', robot_name=None, robot_webhook=None, robot_key=None, robot_type='dingtalk'): ret = None if notify_type == 'mail': mail_send = OmsMail() ret = mail_send.test_notify(receiver) if notify_type == 'robot': robot_notify = ROBOT_CATEGORIES[robot_type](robot_webhook, robot_key) ret = robot_notify.test_notify(receiver, robot_name) return ret def publishorder_notify(self, *args, **kwargs): """ kwargs: [id, creator, apps, title, order_id, created_time, expect_time] """ microapps = MicroApp.objects.filter( appinfo__id__in=list(set([i['id'] for i in kwargs['apps']]))) try: team_members_id = [] [team_members_id.extend(i.team_members.get('op', [])) for i in microapps] team_members = UserProfile.objects.filter( id__in=list(set(team_members_id))) recv_phone = ','.join(list(set([i.mobile for i in team_members]))) notify = DataDict.objects.get(key='TICKET_NOTIFY') _robot = get_redis_data(notify.value) robot_notify = ROBOT_CATEGORIES[_robot['type']]( _robot['webhook'], _robot['key']) msg = f'''你有新的工单待处理! 标题: {kwargs['title']} 工单ID: {kwargs['order_id']} 期望发版时间: {kwargs['expect_time']} 创建时间: {kwargs['created_time']} 创建人: {kwargs['creator']} 链接: [{get_redis_data('platform')['url'].strip('/')}/#/deploy/{kwargs['id']}/detail]({get_redis_data('platform')['url'].strip('/')}/#/deploy/{kwargs['id']}/detail) ''' notify_result = robot_notify.deploy_notify( msg, recv_phone, title=kwargs['title']) if notify_result.get('status', 1) != 0: logger.error(f"工单消息[{kwargs['title']}]通知异常, 原因: {notify_result}") raise Exception(notify_result) logger.info( f"部署消息通知成功: {kwargs['title']} | {recv_phone} | {_robot} | {msg}") except BaseException as e: logger.error(f"工单消息[{kwargs['title']}通知异常, 原因: {e}") def k8s_resource_delete(*args, **kwargs): k8s_config = kwargs['config'] resource = kwargs['resource'] api_version = kwargs['apiversion'] cluster_id = kwargs['cluster_id'] k8s = KubernetesCluster.objects.get(id=cluster_id) try: k8s_config = json.loads(k8s_config) cli = k8s_cli(k8s, k8s_config) if not cli[0]: return None cli = cli[1] except BaseException as e: return None ret = [] if resource == 'deployment': ret.append(cli.delete_namespace_deployment( kwargs['app_name'], kwargs['namespace'], api_version)) ret.append(cli.delete_namespace_service( kwargs['app_name'], kwargs['namespace'], api_version)) return ret if resource in ['service', 'services']: ret.append(cli.delete_namespace_service( kwargs['app_name'], kwargs['namespace'], api_version)) return ret if resource == 'configmap': ret.append(cli.delete_namespace_configmap( kwargs['app_name'], kwargs['namespace'], api_version)) return ret @app.task def k8s_service_create(cluster_id, k8s_config, name, targets, namespace='default', service_type='NodePort'): k8s = KubernetesCluster.objects.get(id=cluster_id) cli = k8s_cli(k8s, k8s_config) if not cli[0]: return {'job': '创建Service', 'msg': 'Kubernetes配置异常,请联系运维!'} cli = cli[1] ret = cli.create_namespace_service(name, targets, namespace, service_type) return {'job': '创建Deployment', 'msg': ret} @app.task def watch_k8s_update(k8s_config, name, namespace): config.kube_config.load_kube_config_from_dict(yaml.safe_load(k8s_config)) cli = client.AppsV1beta2Api() count = 200 w = watch.Watch() for event in w.stream(cli.read_namespaced_deployment(name, namespace), timeout_seconds=10): print("Event: %s %s %s" % ( event['type'], event['object'].kind, event['object'].metadata.name), event ) count -= 1 if not count: w.stop() print("Finished pod stream.") @app.task def watch_k8s_deployment(*args): """ 实时获取前后端应用发布中的日志 :param args: :return: """ channel_name = args[0] job_id = args[1] app_id = args[2] channel_layer = get_channel_layer() job = DeployJob.objects.get(id=job_id) redis_conn = RedisManage().conn() _flag = True count = 0 while _flag: app_deploy_stat = cache.get(f'appdeploy:stat:{job.id}') msg = cache.get(f'appdeploy:{job.id}') if not app_deploy_stat: async_to_sync(channel_layer.send)( channel_name, { "type": "send.message", "message": json.dumps(msg) } ) time.sleep(0.5) continue count += 1 async_to_sync(channel_layer.send)( channel_name, { "type": "send.message", "message": isinstance(msg, str) and msg or json.dumps(msg) } ) if count > 5: # 部署结束, 发送 devops-done time.sleep(3) async_to_sync(channel_layer.send)( channel_name, { "type": "send.message", "message": 'devops-done' } ) _flag = False if DeployJob.objects.get(id=job_id).status != 3: cache.delete(f'appdeploy:{job.id}') redis_conn.delete(job.id) @app.task def workflow_email_notice(title, msg, receiver): # 邮件发送 mail = OmsMail() mail.send_mail(title, msg, receiver, is_html=True) logger.debug(f'工单系统 邮件发送 {receiver}') def workflow_callback(callback_type, workflow_node_history_id, workflow_node_history_callback_id, method, url, headers=None, cookies=None, timeout=30): workflow_node_history_obj = WorkflowNodeHistory.objects.get( id=workflow_node_history_id) workflow_node_history_callback_obj = WorkflowNodeHistoryCallback.objects.get( id=workflow_node_history_callback_id) try: workflow_obj = workflow_node_history_obj.workflow first_node_name = workflow_obj.template.nodes[0]['name'] first_node_form = WorkflowNodeHistory.objects.filter( workflow=workflow_obj, node=first_node_name).first().form result = callback_work( callback_type, method, url,
template_model_cls=WorkflowTemplateRevisionHistory,
18
2023-12-13 03:09:32+00:00
24k
MarilynKeller/aitviewer-skel
aitviewer/renderables/sdf.py
[ { "identifier": "BoundingBoxes", "path": "aitviewer/renderables/bounding_boxes.py", "snippet": "class BoundingBoxes(Node):\n \"\"\"\n Draw bounding boxes.\n \"\"\"\n\n def __init__(self, vertices, thickness=0.005, color=(0.0, 0.0, 1.0, 1.0), **kwargs):\n \"\"\"\n Initializer.\n :param vertices: Set of 3D coordinates as a np array of shape (N, 8, 3). The vertices will be connected in the\n following way: 0-1-2-3-0 (bottom) 4-5-6-7-4 (top) 0-4 1-5 2-6 3-7 (vertical connections between bottom\n and top).\n :param thickness: Line thickness.\n :param color: Color of the lines.\n \"\"\"\n if not isinstance(vertices, np.ndarray):\n vertices = np.array(vertices)\n if len(vertices.shape) == 2:\n vertices = vertices[np.newaxis]\n else:\n assert len(vertices.shape) == 3\n assert vertices.shape[1] == 8\n super(BoundingBoxes, self).__init__(n_frames=len(vertices), color=color, **kwargs)\n\n self.vertices = vertices\n\n self.lines = Lines(\n lines=self._get_line_coords(),\n mode=\"lines\",\n r_base=thickness,\n color=self.color,\n cast_shadow=False,\n )\n self.spheres = Spheres(positions=self.vertices, radius=thickness, color=self.color, cast_shadow=False)\n self._add_nodes(self.lines, self.spheres, show_in_hierarchy=False)\n\n @property\n def bounds(self):\n return self.get_bounds(self.vertices)\n\n @property\n def current_bounds(self):\n return self.get_bounds(self.vertices[self.current_frame_id])\n\n @staticmethod\n def from_min_max_diagonal(v_min, v_max, **kwargs):\n \"\"\"\n Create an axis-aligned bounding box from the 3D diagonal.\n :param v_min: np array of shape (N, 3).\n :param v_max: np array of shape (N, 3).\n :return: BoundingBoxes corresponding to the given diagonals.\n \"\"\"\n vertices = np.zeros((v_min.shape[0], 8, 3), dtype=v_min.dtype)\n vertices[:, 0:4] = v_min[:, np.newaxis]\n vertices[:, 1, 0] = v_max[:, 0]\n vertices[:, 2, 0:2] = v_max[:, 0:2]\n vertices[:, 3, 1] = v_max[:, 1]\n\n vertices[:, 4:] = v_max[:, np.newaxis]\n vertices[:, 4, 0:2] = v_min[:, 0:2]\n vertices[:, 7, 0] = v_min[:, 0]\n vertices[:, 5, 1] = v_min[:, 1]\n\n return BoundingBoxes(vertices, **kwargs)\n\n def _get_line_coords(self):\n lines = np.zeros((self.n_frames, 12 * 2, 3), dtype=self.vertices.dtype)\n\n # Bottom 0-1-2-3-0.\n lines[:, 0:2] = self.vertices[:, 0:2]\n lines[:, 2:4] = self.vertices[:, 1:3]\n lines[:, 4:6] = self.vertices[:, 2:4]\n lines[:, 6:8] = self.vertices[:, [3, 0]]\n\n # Top 4-5-6-7-4.\n lines[:, 8:10] = self.vertices[:, 4:6]\n lines[:, 10:12] = self.vertices[:, 5:7]\n lines[:, 12:14] = self.vertices[:, 6:8]\n lines[:, 14:16] = self.vertices[:, [7, 4]]\n\n # Vertical Connections.\n lines[:, 16:18] = self.vertices[:, [0, 4]]\n lines[:, 18:20] = self.vertices[:, [1, 5]]\n lines[:, 20:22] = self.vertices[:, [2, 6]]\n lines[:, 22:24] = self.vertices[:, [3, 7]]\n\n return lines\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n self.lines.color = color\n self.spheres.color = color" }, { "identifier": "Lines", "path": "aitviewer/renderables/lines.py", "snippet": "class Lines(Node):\n \"\"\"Render lines as cylinders or cones. Can render approx. 600k lines at 40 fps.\"\"\"\n\n def __init__(\n self,\n lines,\n r_base=0.01,\n r_tip=None,\n color=(0.0, 0.0, 1.0, 1.0),\n mode=\"line_strip\",\n cast_shadow=True,\n **kwargs,\n ):\n \"\"\"\n Initializer.\n :param lines: Set of 3D coordinates as a np array of shape (F, L, 3) or (L, 3).\n :param r_base: Thickness of the line.\n :param r_tip: If set, the thickness of the line will taper from r_base to r_tip. If set to 0.0 it will create\n a proper cone.\n :param color: Color of the line (4-tuple) or array of color (N_LINES, 4), one for each line.\n :param mode: 'lines' or 'line_strip'.\n 'lines': a line is drawn from point 0 to 1, from 2 to 3, and so on, number of lines is L / 2.\n 'line_strip': a line is drawn between all adjacent points, 0 to 1, 1 to 2 and so on, number of lines is L - 1.\n :param cast_shadow: If True the mesh casts a shadow on other objects.\n \"\"\"\n if len(lines.shape) == 2:\n lines = lines[np.newaxis]\n assert len(lines.shape) == 3\n assert mode == \"lines\" or mode == \"line_strip\"\n if mode == \"lines\":\n assert lines.shape[1] % 2 == 0\n\n self._lines = lines\n self.mode = mode\n self.r_base = r_base\n self.r_tip = r_tip if r_tip is not None else r_base\n\n self.vertices, self.faces = self.get_mesh()\n self.n_lines = self.lines.shape[1] // 2 if mode == \"lines\" else self.lines.shape[1] - 1\n\n # Define a default material in case there is None.\n if isinstance(color, tuple) or len(color.shape) == 1:\n kwargs[\"material\"] = kwargs.get(\"material\", Material(color=color, ambient=0.2))\n self.line_colors = kwargs[\"material\"].color\n else:\n assert (\n color.shape[1] == 4 and color.shape[0] == self.n_lines\n ), \"Color must be a tuple of 4 values or a numpy array of shape (N_LINES, 4)\"\n self.line_colors = color\n\n super(Lines, self).__init__(n_frames=self.lines.shape[0], **kwargs)\n\n self._need_upload = True\n self.draw_edges = False\n\n # Render passes.\n self.outline = True\n self.fragmap = True\n self.depth_prepass = True\n self.cast_shadow = cast_shadow\n\n @property\n def bounds(self):\n bounds = self.get_bounds(self.lines)\n r = max(self.r_base, self.r_tip)\n bounds[:, 0] -= r\n bounds[:, 1] += r\n return bounds\n\n @property\n def current_bounds(self):\n bounds = self.get_bounds(self.current_lines)\n r = max(self.r_base, self.r_tip)\n bounds[:, 0] -= r\n bounds[:, 1] += r\n return bounds\n\n @property\n def lines(self):\n return self._lines\n\n @lines.setter\n def lines(self, value):\n self._lines = value if len(value.shape) == 3 else value[np.newaxis]\n self.n_frames = self.lines.shape[0]\n self.redraw()\n\n @property\n def current_lines(self):\n idx = self.current_frame_id if self._lines.shape[0] > 1 else 0\n return self._lines[idx]\n\n @current_lines.setter\n def current_lines(self, lines):\n assert len(lines.shape) == 2\n idx = self.current_frame_id if self._lines.shape[0] > 1 else 0\n self._lines[idx] = lines\n self.redraw()\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n self.line_colors = color\n self.redraw()\n\n @property\n def line_colors(self):\n if len(self._line_colors.shape) == 1:\n t = np.tile(np.array(self._line_colors), (self.n_lines, 1))\n return t\n else:\n return self._line_colors\n\n @line_colors.setter\n def line_colors(self, color):\n if isinstance(color, tuple):\n color = np.array(color)\n self._line_colors = color\n self.redraw()\n\n def on_frame_update(self):\n self.redraw()\n\n def redraw(self, **kwargs):\n self._need_upload = True\n\n @Node.once\n def make_renderable(self, ctx: moderngl.Context):\n self.prog = get_lines_instanced_program()\n\n vs_path = \"lines_instanced_positions.vs.glsl\"\n self.outline_program = get_outline_program(vs_path)\n self.depth_only_program = get_depth_only_program(vs_path)\n self.fragmap_program = get_fragmap_program(vs_path)\n\n self.vbo_vertices = ctx.buffer(self.vertices.astype(\"f4\").tobytes())\n self.vbo_indices = ctx.buffer(self.faces.astype(\"i4\").tobytes())\n self.vbo_instance_base = ctx.buffer(reserve=self.n_lines * 12)\n self.vbo_instance_tip = ctx.buffer(reserve=self.n_lines * 12)\n self.vbo_instance_color = ctx.buffer(reserve=self.n_lines * 16)\n\n self.vao = VAO()\n self.vao.buffer(self.vbo_vertices, \"3f4\", \"in_position\")\n self.vao.buffer(self.vbo_instance_base, \"3f4/i\", \"instance_base\")\n self.vao.buffer(self.vbo_instance_tip, \"3f4/i\", \"instance_tip\")\n self.vao.buffer(self.vbo_instance_color, \"4f4/i\", \"instance_color\")\n self.vao.index_buffer(self.vbo_indices)\n\n def _upload_buffers(self):\n if not self.is_renderable or not self._need_upload:\n return\n self._need_upload = False\n\n lines = self.current_lines\n if self.mode == \"lines\":\n v0s = lines[::2]\n v1s = lines[1::2]\n else:\n v0s = lines[:-1]\n v1s = lines[1:]\n\n self.vbo_instance_base.write(v0s.astype(\"f4\").tobytes())\n self.vbo_instance_tip.write(v1s.astype(\"f4\").tobytes())\n\n if len(self._line_colors.shape) > 1:\n self.vbo_instance_color.write(self._line_colors.astype(\"f4\").tobytes())\n\n def render(self, camera, **kwargs):\n self._upload_buffers()\n\n prog = self.prog\n prog[\"r_base\"] = self.r_base\n prog[\"r_tip\"] = self.r_tip\n if len(self._line_colors.shape) == 1:\n prog[\"use_uniform_color\"] = True\n prog[\"uniform_color\"] = tuple(self.color)\n else:\n prog[\"use_uniform_color\"] = False\n prog[\"draw_edges\"].value = 1.0 if self.draw_edges else 0.0\n prog[\"win_size\"].value = kwargs[\"window_size\"]\n prog[\"clip_control\"].value = (0, 0, 0)\n\n self.set_camera_matrices(prog, camera, **kwargs)\n set_lights_in_program(\n prog,\n kwargs[\"lights\"],\n kwargs[\"shadows_enabled\"],\n kwargs[\"ambient_strength\"],\n )\n set_material_properties(prog, self.material)\n self.receive_shadow(prog, **kwargs)\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_lines)\n\n def render_positions(self, prog):\n if self.is_renderable:\n self._upload_buffers()\n prog[\"r_base\"] = self.r_base\n prog[\"r_tip\"] = self.r_tip\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_lines)\n\n def get_mesh(self):\n v0s = np.array([[0, 0, 0]], np.float32)\n v1s = np.array([[0, 0, 1]], np.float32)\n\n # If r_tip is below a certain threshold, we create a proper cone, i.e. with just a single vertex at the top.\n if self.r_tip < 1e-5:\n data = _create_cone_from_to(v0s, v1s, radius=1.0)\n else:\n data = _create_cylinder_from_to(v0s, v1s, radius1=1.0, radius2=1.0)\n\n return data[\"vertices\"][0], data[\"faces\"]\n\n @hooked\n def release(self):\n if self.is_renderable:\n self.vao.release()\n\n def update_frames(self, lines, frames):\n self.lines[frames] = lines\n self.redraw()\n\n def add_frames(self, lines):\n if len(lines.shape) == 2:\n lines = lines[np.newaxis]\n self.lines = np.append(self.lines, lines, axis=0)\n\n def remove_frames(self, frames):\n self.lines = np.delete(self.lines, frames, axis=0)\n self.redraw()\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n name = f\"{self.name}_{self.uid:03}\".replace(\" \", \"_\")\n usd_path = f\"{usd_path}/{name}\"\n\n if self.mode == \"lines\":\n v0s = self.lines[:, ::2]\n v1s = self.lines[:, 1::2]\n else:\n v0s = self.lines[:, :-1]\n v1s = self.lines[:, 1:]\n\n print(self.lines.shape)\n print(v0s.shape)\n\n # Data is in the form of (F, N_LINES, 3), convert it to (F*N_LINES, 3)\n v0s = np.reshape(v0s, (-1, 3))\n v1s = np.reshape(v1s, (-1, 3))\n\n self.r_tip = self.r_base if self.r_tip is None else self.r_tip\n\n # If r_tip is below a certain threshold, we create a proper cone, i.e. with just a single vertex at the top.\n if self.r_tip < 10e-6:\n data = _create_cone_from_to(v0s, v1s, radius=self.r_base)\n else:\n data = _create_cylinder_from_to(v0s, v1s, radius1=self.r_base, radius2=self.r_tip)\n\n L = self.n_lines\n V = data[\"vertices\"].shape[1]\n\n vertices = data[\"vertices\"].reshape((self.n_frames, -1, 3))\n faces = data[\"faces\"]\n\n fs = faces[np.newaxis].repeat(L, 0).reshape((L, -1))\n offsets = (np.arange(L) * V).reshape((L, 1))\n faces = (fs + offsets).reshape((-1, 3))\n\n mesh = usd.add_mesh(stage, usd_path, self.name, vertices, faces, self.get_local_transform())\n usd.add_color(stage, mesh, usd_path, self.color[:3])\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "Meshes", "path": "aitviewer/renderables/meshes.py", "snippet": "class Meshes(Node):\n \"\"\"A sequence of triangle meshes. This assumes that the mesh topology is fixed over the sequence.\"\"\"\n\n def __init__(\n self,\n vertices,\n faces,\n vertex_normals=None,\n face_normals=None,\n vertex_colors=None,\n face_colors=None,\n uv_coords=None,\n path_to_texture=None,\n cast_shadow=True,\n pickable=True,\n flat_shading=False,\n draw_edges=False,\n draw_outline=False,\n instance_transforms=None,\n icon=\"\\u008d\",\n **kwargs,\n ):\n \"\"\"\n Initializer.\n :param vertices: A np array of shape (N, V, 3) or (V, 3).\n :param faces: A np array of shape (F, 3).\n :param vertex_normals: A np array of shape (N, V, 3). If not provided, the vertex normals will be computed,\n which incurs some overhead.\n :param face_normals: A np array of shape (N, F, 3). If not provided, the face normals will be computed, which\n incurs some overhead.\n :param vertex_colors: A np array of shape (N, V, 4) overriding the uniform color.\n :param face_colors: A np array of shape (N, F, 4) overriding the uniform or vertex colors.\n :param uv_coords: A np array of shape (V, 2) if the mesh is to be textured.\n :param path_to_texture: Path to an image file that serves as the texture.\n :param cast_shadow: If True the mesh casts a shadow on other objects.\n :param pickable: If True the mesh can be selected with a mouse click.\n :param flat_shading: If True the each face of the mesh is shaded with a constant normal.\n :param draw_edges: If True the normals the edges of the mesh is drawn on top of the mesh.\n :param draw_outline: If true an outline is drawn around the mesh.\n :instance_transforms: np array of size (N, I, 4, 4) or (I, 4, 4) or None. If not None, 'I' instances of\n the same mesh will be rendered, each with its own transformation matrix.\n \"\"\"\n if len(vertices.shape) == 2 and vertices.shape[-1] == 3:\n vertices = vertices[np.newaxis]\n assert len(vertices.shape) == 3\n assert len(faces.shape) == 2\n n_frames = vertices.shape[0]\n\n # Instancing.\n if instance_transforms is not None:\n # Check shape of transforms.\n if len(instance_transforms.shape) == 3:\n instance_transforms = instance_transforms[np.newaxis]\n assert len(instance_transforms.shape) == 4\n\n # Number of instance frames must match number of frames or be 1.\n assert n_frames == 1 or instance_transforms.shape[0] == 1 or n_frames == instance_transforms.shape[0]\n n_frames = max(n_frames, instance_transforms.shape[0])\n\n self._instance_transforms = instance_transforms\n else:\n self._instance_transforms = None\n\n super(Meshes, self).__init__(n_frames=n_frames, icon=icon, **kwargs)\n\n self._vertices = vertices\n self._faces = faces.astype(np.int32)\n\n # Create these first because other setters can call redraw() which uses this fields.\n self._face_colors = None\n self._vertex_colors = None\n self._has_transparent_vertex_or_face_colors = False\n\n def _maybe_unsqueeze(x):\n return x[np.newaxis] if x is not None and x.ndim == 2 else x\n\n self._vertex_normals = _maybe_unsqueeze(vertex_normals)\n self._face_normals = _maybe_unsqueeze(face_normals)\n self.vertex_colors = _maybe_unsqueeze(vertex_colors)\n self.face_colors = _maybe_unsqueeze(face_colors)\n\n # Texture handling.\n self.has_texture = (uv_coords is not None) and (path_to_texture is not None)\n self.uv_coords = uv_coords\n self.texture_path = path_to_texture\n\n if self.has_texture:\n self.use_pickle_texture = path_to_texture.endswith((\".pickle\", \"pkl\"))\n if self.use_pickle_texture:\n self.texture_image = pickle.load(open(path_to_texture, \"rb\"))\n else:\n self.texture_image = Image.open(path_to_texture).transpose(method=Image.FLIP_TOP_BOTTOM).convert(\"RGB\")\n else:\n self.texture_image = None\n\n # Enable rendering passes\n self.cast_shadow = cast_shadow\n self.fragmap = pickable\n self.depth_prepass = True\n self.outline = True\n\n # Misc.\n self._flat_shading = flat_shading\n self.draw_edges = draw_edges\n self.draw_outline = draw_outline\n self.show_texture = self.has_texture\n self.norm_coloring = False\n self.normals_r = None\n self.need_upload = True\n self._use_uniform_color = self._vertex_colors is None and self._face_colors is None\n self._vertex_faces_sparse = trimesh.geometry.index_sparse(self._vertices.shape[1], self._faces)\n\n self.clip_control = np.array((0, 0, 0), np.int32)\n self.clip_value = np.array((0, 0, 0), np.float32)\n\n @classmethod\n def instanced(cls, *args, positions=None, rotations=None, scales=None, **kwargs):\n \"\"\"\n Creates and returns an instanced sequence of N frames and I instances.\n Each instance will have its own position, rotation and scale.\n :param positions: np array of size (N, I, 3) or (I, 3) or None.\n :param rotations: np array of size (N, I, 3, 3) or (I, 3, 3) or None.\n :param scales: np array of size (N, I) or (I) or None.\n\n *args, and **kwargs are forwarded to the Meshes constructor.\n \"\"\"\n assert positions is not None or rotations is not None or scales is not None\n\n n_instances = 0\n n_frames = 0\n\n def check_array(a, dim):\n nonlocal n_instances, n_frames\n if a is not None:\n if len(a.shape) == dim + 1:\n a = a[np.newaxis]\n n_frames = max(n_frames, a.shape[0])\n n_instances = max(n_instances, a.shape[1])\n return a\n\n positions = check_array(positions, 1)\n rotations = check_array(rotations, 2)\n scales = check_array(scales, 0)\n\n if positions is None:\n positions = np.zeros((n_frames, n_instances, 3))\n if rotations is None:\n rotations = np.zeros((n_frames, n_instances, 3, 3))\n rotations[:, :] = np.eye(3)\n if scales is None:\n scales = np.ones((n_frames, n_instances))\n\n transforms = np.zeros((n_frames, n_instances, 4, 4))\n transforms[:, :, :3, :3] = (rotations.reshape((-1, 9)) * scales.reshape((-1, 1))).reshape(\n (n_frames, n_instances, 3, 3)\n )\n transforms[:, :, :3, 3] = positions\n transforms[:, :, 3, 3] = 1.0\n return cls(*args, **kwargs, instance_transforms=transforms)\n\n @classmethod\n def from_file(cls, file, **kwargs):\n \"\"\"\n Loads a mesh from a file that can be loaded by trimesh (e.g. \".obj\", \".ply\", ...)\n See trimesh.available_formats() for a complete list.\n \"\"\"\n mesh = trimesh.load(file)\n\n uvs = None\n vertex_colors = None\n face_colors = None\n if isinstance(mesh.visual, trimesh.visual.ColorVisuals):\n if mesh.visual.kind == \"vertex_colors\":\n vertex_colors = mesh.visual.vertex_colors\n elif mesh.visual.kind == \"face_colors\":\n face_colors = mesh.visual.vertex_colors\n elif isinstance(mesh.visual, trimesh.visual.TextureVisuals):\n uvs = mesh.visual.uv\n\n return Meshes(\n mesh.vertices,\n mesh.faces,\n vertex_normals=mesh.vertex_normals,\n face_colors=face_colors,\n vertex_colors=vertex_colors,\n uv_coords=uvs,\n **kwargs,\n )\n\n @property\n def vertices(self):\n return self._vertices\n\n @vertices.setter\n def vertices(self, vertices):\n if len(vertices.shape) == 2:\n vertices = vertices[np.newaxis]\n\n # Update vertices and redraw\n self._vertices = vertices\n self.n_frames = len(vertices)\n\n # If vertex or face normals were supplied, they are no longer valid.\n self._vertex_normals = None\n self._face_normals = None\n\n # Must clear all LRU caches where the vertices are used.\n self.compute_vertex_and_face_normals.cache_clear()\n\n self.redraw()\n\n @property\n def faces(self):\n return self._faces\n\n @faces.setter\n def faces(self, f):\n self._faces = f.astype(np.int32)\n self._vertex_faces_sparse = trimesh.geometry.index_sparse(self.vertices.shape[1], self._faces)\n\n @property\n def current_vertices(self):\n idx = self.current_frame_id if self.vertices.shape[0] > 1 else 0\n return self.vertices[idx]\n\n @current_vertices.setter\n def current_vertices(self, vertices):\n idx = self.current_frame_id if self.vertices.shape[0] > 1 else 0\n self._vertices[idx] = vertices\n self.compute_vertex_and_face_normals.cache_clear()\n self.redraw()\n\n @property\n def current_transformed_vertices(self):\n return (self.current_vertices @ self.model_matrix[:3, :3].T) + self.model_matrix[:3, 3]\n\n @property\n def transformed_vertices(self):\n return (self.vertices @ self.model_matrix[:3, :3].T) + self.model_matrix[:3, 3]\n\n @property\n def n_faces(self):\n return self.faces.shape[0]\n\n @property\n def n_vertices(self):\n return self.vertices.shape[1]\n\n @property\n def vertex_faces(self):\n # To compute the normals we need to know a mapping from vertex ID to all faces that this vertex is part of.\n # Because we are lazy we abuse trimesh to compute this for us. Not all vertices have the maximum degree, so\n # this array is padded with -1 if necessary.\n return trimesh.Trimesh(self.vertices[0], self.faces, process=False).vertex_faces\n\n @property\n def vertex_normals(self):\n \"\"\"Get or compute all vertex normals (this might take a while for long sequences).\"\"\"\n if self._vertex_normals is None:\n vertex_normals, _ = compute_vertex_and_face_normals_sparse(\n self.vertices, self.faces, self._vertex_faces_sparse, normalize=True\n )\n self._vertex_normals = vertex_normals\n return self._vertex_normals\n\n @property\n def face_normals(self):\n \"\"\"Get or compute all face normals (this might take a while for long sequences).\"\"\"\n if self._face_normals is None:\n _, face_normals = compute_vertex_and_face_normals_sparse(\n self.vertices, self.faces, self._vertex_faces_sparse, normalize=True\n )\n self._face_normals = face_normals\n return self._face_normals\n\n def vertex_normals_at(self, frame_id):\n \"\"\"Get or compute the vertex normals at the given frame.\"\"\"\n if self._vertex_normals is None:\n vn, _ = self.compute_vertex_and_face_normals(frame_id, normalize=True)\n else:\n assert len(self._vertex_normals.shape) == 3, f\"Got shape {self._vertex_normals.shape}\"\n vn = self._vertex_normals[frame_id]\n return vn\n\n def face_normals_at(self, frame_id):\n \"\"\"Get or compute the face normals at the given frame.\"\"\"\n if self._face_normals is None:\n _, fn = self.compute_vertex_and_face_normals(frame_id, normalize=True)\n else:\n assert len(self._face_normals.shape) == 3, f\"Got shape {self._face_normals.shape}\"\n fn = self._face_normals[frame_id]\n return fn\n\n @property\n def vertex_colors(self):\n if self._vertex_colors is None:\n self._vertex_colors = np.full((self.n_frames, self.n_vertices, 4), self.material.color)\n return self._vertex_colors\n\n @vertex_colors.setter\n def vertex_colors(self, vertex_colors):\n # If vertex_colors are None, we resort to the material color.\n if vertex_colors is None:\n self._vertex_colors = None\n self._use_uniform_color = True\n elif isinstance(vertex_colors, tuple) and len(vertex_colors) == 4:\n self.vertex_colors = None\n self._use_uniform_color = True\n self.material.color = vertex_colors\n else:\n if len(vertex_colors.shape) == 2:\n assert vertex_colors.shape[0] == self.n_vertices\n vertex_colors = np.repeat(vertex_colors[np.newaxis], self.n_frames, axis=0)\n assert len(vertex_colors.shape) == 3\n self._vertex_colors = vertex_colors\n self._use_uniform_color = False\n self.redraw()\n\n @property\n def current_vertex_colors(self):\n if self._use_uniform_color:\n return np.full((self.n_vertices, 4), self.material.color)\n else:\n idx = self.current_frame_id if self.vertex_colors.shape[0] > 1 else 0\n return self.vertex_colors[idx]\n\n @property\n def face_colors(self):\n return self._face_colors\n\n @face_colors.setter\n def face_colors(self, face_colors):\n if face_colors is not None:\n if len(face_colors.shape) == 2:\n face_colors = face_colors[np.newaxis]\n self._face_colors = face_colors\n self._use_uniform_color = False\n else:\n self._face_colors = None\n self.redraw()\n\n @property\n def current_face_colors(self):\n if self._use_uniform_color:\n return np.full((self.n_faces, 4), self.material.color)\n else:\n idx = self.current_frame_id if self.face_colors.shape[0] > 1 else 0\n return self.face_colors[idx]\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n\n if self.face_colors is None:\n self.vertex_colors = color\n\n @property\n def flat_shading(self):\n return self._flat_shading\n\n @flat_shading.setter\n def flat_shading(self, flat_shading):\n if self._flat_shading != flat_shading:\n self._flat_shading = flat_shading\n self.redraw()\n\n def closest_vertex_in_triangle(self, tri_id, point):\n face_vertex_id = np.linalg.norm((self.current_vertices[self.faces[tri_id]] - point), axis=-1).argmin()\n return self.faces[tri_id][face_vertex_id]\n\n def get_bc_coords_from_points(self, tri_id, points):\n return points_to_barycentric(self.current_vertices[self.faces[[tri_id]]], points)[0]\n\n @lru_cache(2048)\n def compute_vertex_and_face_normals(self, frame_id, normalize=False):\n \"\"\"\n Compute face and vertex normals for the given frame. We use an LRU cache since this is a potentially\n expensive operation. This function exists because computing the normals on all frames can increase the\n startup time of the viewer considerably.\n\n :param frame_id: On which frame to compute the normals.\n :param normalize: Whether or not to normalize the normals. Not doing it is faster and the shaders typically\n enforce unit length of normals anyway.\n :return: The vertex and face normals as a np arrays of shape (V, 3) and (F, 3) respectively.\n \"\"\"\n vs = self.vertices[frame_id : frame_id + 1] if self.vertices.shape[0] > 1 else self.vertices\n vn, fn = compute_vertex_and_face_normals_sparse(vs, self.faces, self._vertex_faces_sparse, normalize)\n return vn.squeeze(0), fn.squeeze(0)\n\n @property\n def bounds(self):\n if self.instance_transforms is None:\n return self.get_bounds(self.vertices)\n else:\n # Get bounds in local coordinates\n bounds = self.get_local_bounds(self.vertices)\n\n # Transform bounds with instance transforms\n min = np.append(bounds[:, 0], 1.0)\n max = np.append(bounds[:, 1], 1.0)\n transforms = self.instance_transforms.reshape((-1, 4, 4))\n mins = transforms @ min\n maxs = transforms @ max\n\n # Return bounds in world coordinates\n return self.get_bounds(np.vstack((mins, maxs)))\n\n @property\n def current_bounds(self):\n if self.instance_transforms is None:\n return self.get_bounds(self.current_vertices)\n else:\n # Get bounds in local coordinates\n bounds = self.get_local_bounds(self.current_vertices)\n\n # Transform bounds with instance transforms\n min = np.append(bounds[:, 0], 1.0)\n max = np.append(bounds[:, 1], 1.0)\n transforms = self.current_instance_transforms.reshape((-1, 4, 4))\n mins = transforms @ min\n maxs = transforms @ max\n\n # Return bounds in world coordinates\n return self.get_bounds(np.vstack((mins[:, :3], maxs[:, :3])))\n\n def is_transparent(self):\n return self.color[3] < 1.0 or self._has_transparent_vertex_or_face_colors\n\n def on_frame_update(self):\n \"\"\"Called whenever a new frame must be displayed.\"\"\"\n super().on_frame_update()\n self.redraw()\n\n @property\n def current_instance_transforms(self):\n if self._instance_transforms is None:\n return None\n idx = self.current_frame_id if self._instance_transforms.shape[0] > 1 else 0\n return self._instance_transforms[idx]\n\n @property\n def instance_transforms(self):\n return self._instance_transforms\n\n @instance_transforms.setter\n def instance_transforms(self, instance_transforms):\n assert self._instance_transforms.shape == instance_transforms\n self._instance_transforms = instance_transforms\n\n @property\n def n_instances(self):\n if self._instance_transforms is None:\n return 1\n else:\n return self._instance_transforms.shape[1]\n\n def _upload_buffers(self):\n \"\"\"Upload the current frame data to the GPU for rendering.\"\"\"\n if not self.is_renderable or not self._need_upload:\n return\n\n self._need_upload = False\n\n # Write positions.\n self.vbo_vertices.write(self.current_vertices.astype(\"f4\").tobytes())\n\n # Write normals.\n if not self.flat_shading:\n vertex_normals = self.vertex_normals_at(self.current_frame_id)\n self.vbo_normals.write(vertex_normals.astype(\"f4\").tobytes())\n\n if self.face_colors is None:\n # Write vertex colors.\n self.vbo_colors.write(self.current_vertex_colors.astype(\"f4\").tobytes())\n else:\n # Write face colors.\n\n # Compute shape of 2D texture.\n shape = (min(self.faces.shape[0], 8192), (self.faces.shape[0] + 8191) // 8192)\n\n # Write texture left justifying the buffer to fill the last row of the texture.\n self.face_colors_texture.write(\n self.current_face_colors.astype(\"f4\").tobytes().ljust(shape[0] * shape[1] * 16)\n )\n\n # Write uvs.\n if self.has_texture:\n self.vbo_uvs.write(self.uv_coords.astype(\"f4\").tobytes())\n\n # Write instance transforms.\n if self.instance_transforms is not None:\n self.vbo_instance_transforms.write(\n np.transpose(self.current_instance_transforms.astype(\"f4\"), (0, 2, 1)).tobytes()\n )\n\n @hooked\n def redraw(self, **kwargs):\n self._need_upload = True\n\n transparent = False\n if self._vertex_colors is not None:\n transparent = transparent or np.any(self.vertex_colors[:, :, 3] < 1.0)\n if self._face_colors is not None:\n transparent = transparent or np.any(self.face_colors[:, :, 3] < 1.0)\n\n self._has_transparent_vertex_or_face_colors = transparent\n\n def _load_programs(self, vs, positions_vs):\n instanced = 1 if self.instance_transforms is not None else 0\n self.smooth_prog = get_smooth_lit_with_edges_program(vs, instanced)\n self.flat_prog = get_flat_lit_with_edges_program(vs, instanced)\n self.smooth_face_prog = get_smooth_lit_with_edges_face_color_program(vs, instanced)\n self.flat_face_prog = get_flat_lit_with_edges_face_color_program(vs, instanced)\n\n self.depth_only_program = get_depth_only_program(positions_vs, instanced)\n self.outline_program = get_outline_program(positions_vs, instanced)\n self.fragmap_program = get_fragmap_program(positions_vs, instanced)\n\n # noinspection PyAttributeOutsideInit\n @Node.once\n def make_renderable(self, ctx: moderngl.Context):\n \"\"\"Prepares this object for rendering. This function must be called before `render` is used.\"\"\"\n vs = \"lit_with_edges.glsl\"\n positions_vs = \"mesh_positions.vs.glsl\"\n self._load_programs(vs, positions_vs)\n\n vertices = self.current_vertices\n vertex_normals = self.vertex_normals_at(self.current_frame_id)\n vertex_colors = self.current_vertex_colors\n\n self.vbo_vertices = ctx.buffer(vertices.astype(\"f4\").tobytes())\n self.vbo_normals = ctx.buffer(vertex_normals.astype(\"f4\").tobytes())\n self.vbo_colors = ctx.buffer(vertex_colors.astype(\"f4\").tobytes())\n self.vbo_indices = ctx.buffer(self.faces.tobytes())\n\n self.vao = VAO()\n self.vao.buffer(self.vbo_vertices, \"3f4\", \"in_position\")\n self.vao.buffer(self.vbo_normals, \"3f4\", \"in_normal\")\n self.vao.buffer(self.vbo_colors, \"4f4\", \"in_color\")\n self.vao.index_buffer(self.vbo_indices)\n\n if self.instance_transforms is not None:\n self.vbo_instance_transforms = ctx.buffer(\n np.transpose(self.current_instance_transforms.astype(\"f4\"), (0, 2, 1)).tobytes()\n )\n self.vao.buffer(self.vbo_instance_transforms, \"16f4/i\", \"instance_transform\")\n\n # Compute shape of 2D texture.\n shape = (min(self.faces.shape[0], 8192), (self.faces.shape[0] + 8191) // 8192)\n self.face_colors_texture = ctx.texture(shape, 4, dtype=\"f4\")\n if self.face_colors is not None:\n # Write texture left justifying the buffer to fill the last row of the texture.\n self.face_colors_texture.write(\n self.current_face_colors.astype(\"f4\").tobytes().ljust(shape[0] * shape[1] * 16)\n )\n\n if self.has_texture:\n img = self.texture_image\n if self.use_pickle_texture:\n self.texture = ctx.texture(img.shape[:2], img.shape[2], img.tobytes())\n else:\n self.texture = ctx.texture(img.size, 3, img.tobytes())\n self.texture_prog = get_smooth_lit_texturized_program(vs)\n self.vbo_uvs = ctx.buffer(self.uv_coords.astype(\"f4\").tobytes())\n self.vao.buffer(self.vbo_uvs, \"2f4\", \"in_uv\")\n\n @hooked\n def release(self):\n if self.is_renderable:\n self.vao.release()\n if self.has_texture:\n self.texture.release()\n\n def _use_program(self, camera, **kwargs):\n if self.has_texture and self.show_texture:\n prog = self.texture_prog\n prog[\"diffuse_texture\"] = 0\n self.texture.use(0)\n else:\n if self.face_colors is None:\n if self.flat_shading:\n prog = self.flat_prog\n else:\n prog = self.smooth_prog\n else:\n if self.flat_shading:\n prog = self.flat_face_prog\n else:\n prog = self.smooth_face_prog\n self.face_colors_texture.use(0)\n prog[\"face_colors\"] = 0\n prog[\"norm_coloring\"].value = self.norm_coloring\n\n prog[\"use_uniform_color\"] = self._use_uniform_color\n prog[\"uniform_color\"] = self.material.color\n prog[\"draw_edges\"].value = 1.0 if self.draw_edges else 0.0\n prog[\"win_size\"].value = kwargs[\"window_size\"]\n\n prog[\"clip_control\"].value = tuple(self.clip_control)\n prog[\"clip_value\"].value = tuple(self.clip_value)\n\n self.set_camera_matrices(prog, camera, **kwargs)\n set_lights_in_program(\n prog,\n kwargs[\"lights\"],\n kwargs[\"shadows_enabled\"],\n kwargs[\"ambient_strength\"],\n )\n set_material_properties(prog, self.material)\n self.receive_shadow(prog, **kwargs)\n return prog\n\n def render(self, camera, **kwargs):\n self._upload_buffers()\n prog = self._use_program(camera, **kwargs)\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_instances)\n\n def render_positions(self, prog):\n if self.is_renderable:\n self._upload_buffers()\n\n prog[\"clip_control\"].value = tuple(self.clip_control)\n prog[\"clip_value\"].value = tuple(self.clip_value)\n\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_instances)\n\n def _show_normals(self):\n \"\"\"Create and add normals at runtime\"\"\"\n vn = self.vertex_normals\n\n bounds = self.bounds\n diag = np.linalg.norm(bounds[:, 0] - bounds[:, 1])\n\n length = 0.005 * max(diag, 1) / self.scale\n vn = vn / np.linalg.norm(vn, axis=-1, keepdims=True) * length\n\n # Must import here because if we do it at the top we create a circular dependency.\n from aitviewer.renderables.arrows import Arrows\n\n positions = self.vertices\n self.normals_r = Arrows(\n positions,\n positions + vn,\n r_base=length / 10,\n r_head=2 * length / 10,\n p=0.25,\n name=\"Normals\",\n )\n self.normals_r.current_frame_id = self.current_frame_id\n self.add(self.normals_r)\n\n def gui(self, imgui):\n super(Meshes, self).gui(imgui)\n\n _, self.show_texture = imgui.checkbox(\n \"Render Texture##render_texture{}\".format(self.unique_name),\n self.show_texture,\n )\n _, self.norm_coloring = imgui.checkbox(\n \"Norm Coloring##norm_coloring{}\".format(self.unique_name),\n self.norm_coloring,\n )\n _, self.flat_shading = imgui.checkbox(\n \"Flat shading [F]##flat_shading{}\".format(self.unique_name),\n self.flat_shading,\n )\n _, self.draw_edges = imgui.checkbox(\"Draw edges [E]##draw_edges{}\".format(self.unique_name), self.draw_edges)\n _, self.draw_outline = imgui.checkbox(\n \"Draw outline##draw_outline{}\".format(self.unique_name), self.draw_outline\n )\n\n if self.normals_r is None:\n if imgui.button(\"Show Normals ##show_normals{}\".format(self.unique_name)):\n self._show_normals()\n\n def gui_context_menu(self, imgui, x: int, y: int):\n _, self.flat_shading = imgui.menu_item(\"Flat shading\", \"F\", selected=self.flat_shading, enabled=True)\n _, self.draw_edges = imgui.menu_item(\"Draw edges\", \"E\", selected=self.draw_edges, enabled=True)\n _, self.draw_outline = imgui.menu_item(\"Draw outline\", selected=self.draw_outline)\n\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n super().gui_context_menu(imgui, x, y)\n\n def gui_io(self, imgui):\n if imgui.button(\"Export OBJ##export_{}\".format(self.unique_name)):\n mesh = trimesh.Trimesh(vertices=self.current_vertices, faces=self.faces, process=False)\n mesh.export(\"../export/\" + self.name + \".obj\")\n\n def key_event(self, key, wnd_keys):\n if key == wnd_keys.F:\n self.flat_shading = not self.flat_shading\n elif key == wnd_keys.E:\n self.draw_edges = not self.draw_edges\n\n def update_frames(self, vertices, frames):\n self.vertices[frames] = vertices\n self.redraw()\n\n def add_frames(self, vertices):\n if len(vertices.shape) == 2:\n vertices = vertices[np.newaxis]\n self.vertices = np.append(self.vertices, vertices, axis=0)\n self.n_frames = max(self.n_frames, self.vertices.shape[0])\n\n def remove_frames(self, frames):\n self.vertices = np.delete(self.vertices, frames, axis=0)\n self.redraw()\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n name = f\"{self.name}_{self.uid:03}\".replace(\" \", \"_\")\n usd_path = f\"{usd_path}/{name}\"\n\n mesh = usd.add_mesh(stage, usd_path, self.name, self.vertices, self.faces, self.get_local_transform())\n if self.has_texture and not self.use_pickle_texture:\n # UVs.\n a_uv = UsdGeom.PrimvarsAPI(mesh).CreatePrimvar(\n \"st\", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.faceVarying\n )\n a_uv.Set(time=1, value=self.uv_coords[self.faces.flatten()])\n\n if not directory:\n texture_path = os.path.abspath(self.texture_path)\n else:\n texture_path = usd.copy_texture(self.texture_path, name, directory)\n usd.add_texture(stage, mesh, usd_path, texture_path)\n else:\n # NOTE: Per vertex and per face colors using usd displayColor are not currently\n # loaded by Blender. This code path can be enabled once support is there.\n if False:\n a_colors = mesh.GetDisplayColorAttr()\n if self._face_colors is not None:\n # Per face colors.\n if self._face_colors.shape[0] == 1:\n a_colors.Set(self._face_colors[0, :, :3].astype(np.float32))\n else:\n for i in range(self.n_frames):\n a_colors.Set(time=i + 1, value=self._face_colors[i, :, :3].astype(np.float32))\n elif self._vertex_colors is not None:\n # Per vertex colors.\n if self._vertex_colors.shape[0] == 1:\n a_colors.Set(self._vertex_colors[0, :, :3].astype(np.float32))\n else:\n for i in range(self.n_frames):\n a_colors.Set(time=i + 1, value=self._vertex_colors[i, :, :3].astype(np.float32))\n else:\n # Uniform color.\n a_colors.Set(np.array(self.color, np.float32)[:3])\n else:\n usd.add_color(stage, mesh, usd_path, self.color[:3])\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "Node", "path": "aitviewer/scene/node.py", "snippet": "class Node(object):\n \"\"\"Interface for nodes.\"\"\"\n\n def __init__(\n self,\n name=None,\n icon=None,\n position=None,\n rotation=None,\n scale=1.0,\n color=(0.5, 0.5, 0.5, 1.0),\n material=None,\n is_selectable=True,\n gui_affine=True,\n gui_material=True,\n enabled_frames=None,\n n_frames=1,\n ):\n \"\"\"\n :param name: Name of the node\n :param icon: Custom Node Icon using custom Icon font\n :param position: Starting position in the format (X,Y,Z) or np array of positions with shape (F, 3)\n :param rotation: Starting rotation in rotation matrix representation (3,3) or np array of rotations with shape (F, 3, 3)\n :param scale: Starting scale (scalar) or np array of scale values with shape (F)\n :param color: (R,G,B,A) 0-1 formatted color value.\n :param material: Object material properties. The color specified in the material will override node color\n :param is_selectable: If True the node is selectable when clicked on, otherwise the parent node will be selected.\n :param gui_affine: If True the node will have transform controls (position, rotation, scale) in the GUI.\n :param gui_material: If True the node will have material controls in the GUI.\n :param enabled_frames: Numpy array of boolean values, the object will be enabled only in frames where the value is True,\n the number of ones in the mask must match the number of frames of the object.\n :param n_frames: How many frames this renderable has.\n \"\"\"\n # Transform & Animation\n position = np.zeros(3, dtype=np.float32) if position is None else np.array(position, dtype=np.float32)\n rotation = np.eye(3, dtype=np.float32) if rotation is None else np.array(rotation, dtype=np.float32)\n\n self._positions = position if len(position.shape) != 1 else position[np.newaxis]\n self._rotations = rotation if len(rotation.shape) != 2 else rotation[np.newaxis]\n self._scales = (scale if isinstance(scale, np.ndarray) else np.array([scale])).astype(np.float32)\n\n n_positions = self._positions.shape[0]\n n_rotations = self._rotations.shape[0]\n n_scales = self._scales.shape[0]\n\n if n_frames > 1:\n assert n_positions == 1 or n_frames == n_positions, (\n f\"Number of position frames\" f\" ({n_positions}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_rotations == 1 or n_frames == n_rotations, (\n f\"Number of rotations frames\" f\" ({n_rotations}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_scales == 1 or n_frames == n_scales, (\n f\"Number of scales frames\" f\" ({n_scales}) must be 1 or match number of Node frames {n_frames}\"\n )\n else:\n n_frames = max(n_positions, n_rotations, n_scales)\n assert (\n (n_positions == 1 or n_positions == n_frames)\n and (n_rotations == 1 or n_rotations == n_frames)\n and (n_scales == 1 or n_scales == n_frames)\n ), (\n f\"Number of position\"\n f\"({n_positions}), rotation ({n_rotations}) and scale ({n_scales})\"\n \"frames must be 1 or match.\"\n )\n\n # Frames\n self._n_frames = n_frames\n self._current_frame_id = 0\n self.model_matrix = self.get_local_transform()\n self._enabled_frames = enabled_frames\n if self._enabled_frames is not None:\n assert np.count_nonzero(self._enabled_frames) == n_frames, (\n f\"Number of non-zero elements in enabled_frames\"\n f\" ({np.count_nonzero(self._enabled_frames)}) must match number of frames in sequence ({n_frames})\"\n )\n # Create an array that maps from the true frame id (counting also disabled frames) to the index of the\n # first existing frame in the sequence.\n self._enabled_frame_id = np.cumsum(self._enabled_frames) - 1\n\n # Stores the true frame id (counting also disabled frames) we use this to allow going\n # through both enabled and disabled frames from the GUI.\n self._internal_frame_id = 0\n\n # Material\n self.material = Material(color=color) if material is None else material\n\n # Renderable Attributes\n self.is_renderable = False\n self.backface_culling = True\n self.backface_fragmap = False\n self.draw_outline = False\n\n # Flags to enable rendering passes\n self.cast_shadow = False\n self.depth_prepass = False\n self.fragmap = False\n self.outline = False\n\n # Programs for render passes. Subclasses are responsible for setting these.\n self.depth_only_program = None # Required for depth_prepass and cast_shadow passes\n self.fragmap_program = None # Required for fragmap pass\n self.outline_program = None # Required for outline pass\n\n # GUI\n self.name = name if name is not None else type(self).__name__\n self.uid = C.next_gui_id()\n self.unique_name = self.name + \"{}\".format(self.uid)\n self.icon = icon if icon is not None else \"\\u0082\"\n self._enabled = True\n self._expanded = False\n self.gui_controls = {\n \"affine\": {\n \"fn\": self.gui_affine,\n \"icon\": \"\\u009b\",\n \"is_visible\": gui_affine,\n },\n \"material\": {\n \"fn\": self.gui_material,\n \"icon\": \"\\u0088\",\n \"is_visible\": gui_material,\n },\n \"animation\": {\n \"fn\": self.gui_animation,\n \"icon\": \"\\u0098\",\n \"is_visible\": (lambda: self._n_frames > 1)(),\n },\n \"io\": {\n \"fn\": self.gui_io,\n \"icon\": \"\\u009a\",\n \"is_visible\": (lambda: self.gui_io.__func__ is not Node.gui_io)(),\n },\n }\n self.gui_modes = {\"view\": {\"title\": \" View\", \"fn\": self.gui_mode_view, \"icon\": \"\\u0099\"}}\n self._selected_mode = \"view\"\n self._show_in_hierarchy = True\n self.is_selectable = is_selectable\n self.export_usd_enabled = True\n self.export_usd_expanded = True\n\n self.nodes: List[Node] = []\n self.parent: Node = None\n\n # Selected Mode\n @property\n def selected_mode(self):\n return self._selected_mode\n\n @selected_mode.setter\n def selected_mode(self, selected_mode):\n self._selected_mode = selected_mode\n\n # Transform\n @property\n def position(self):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n return self._positions[idx]\n\n @position.setter\n def position(self, position):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n self._positions[idx] = np.array(position, dtype=np.float32).copy()\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def positions(self):\n return self._positions\n\n @positions.setter\n def positions(self, positions):\n self._positions = positions\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotation(self):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n return self._rotations[idx]\n\n @rotation.setter\n def rotation(self, rotation):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n self._rotations[idx] = rotation\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotations(self):\n return self._rotations\n\n @rotations.setter\n def rotations(self, rotations):\n self._rotations = rotations\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scale(self):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n return self._scales[idx]\n\n @scale.setter\n def scale(self, scale):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n self._scales[idx] = scale\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scales(self):\n return self._scales\n\n @scales.setter\n def scales(self, scales):\n self._scales = scales\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @staticmethod\n @lru_cache()\n def _compute_transform(pos, rot, scale):\n rotation = np.eye(4)\n rotation[:3, :3] = np.array(rot)\n\n trans = np.eye(4)\n trans[:3, 3] = np.array(pos)\n\n scale = np.diag([scale, scale, scale, 1])\n\n return (trans @ rotation @ scale).astype(\"f4\")\n\n def get_local_transform(self):\n \"\"\"Construct local transform as a 4x4 matrix from this node's position, orientation and scale.\"\"\"\n return self._compute_transform(tuple(self.position), tuple(map(tuple, self.rotation)), self.scale)\n\n def update_transform(self, parent_transform=None):\n \"\"\"Update the model matrix of this node and all of its descendants.\"\"\"\n if parent_transform is None:\n self.model_matrix = self.get_local_transform()\n else:\n self.model_matrix = parent_transform.astype(\"f4\") @ self.get_local_transform()\n\n for n in self.nodes:\n n.update_transform(self.model_matrix)\n\n @property\n def color(self):\n return self.material.color\n\n @color.setter\n def color(self, color):\n self.material.color = color\n\n @property\n def bounds(self):\n \"\"\"The bounds in the format ((x_min, x_max), (y_min, y_max), (z_min, z_max))\"\"\"\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_bounds(self):\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_center(self):\n return self.current_bounds.mean(-1)\n\n @property\n def center(self):\n return self.bounds.mean(-1)\n\n def get_local_bounds(self, points):\n if len(points.shape) == 2 and points.shape[-1] == 3:\n points = points[np.newaxis]\n assert len(points.shape) == 3\n\n # Compute min and max coordinates of the bounding box ignoring NaNs.\n val = np.array(\n [\n [np.nanmin(points[:, :, 0]), np.nanmax(points[:, :, 0])],\n [np.nanmin(points[:, :, 1]), np.nanmax(points[:, :, 1])],\n [np.nanmin(points[:, :, 2]), np.nanmax(points[:, :, 2])],\n ]\n )\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n def get_bounds(self, points):\n val = self.get_local_bounds(points)\n\n # Transform bounding box with the model matrix.\n val = (self.model_matrix @ np.vstack((val, np.array([1.0, 1.0]))))[:3]\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n @property\n def n_frames(self):\n return self._n_frames\n\n @n_frames.setter\n def n_frames(self, n_frames):\n self._n_frames = n_frames\n\n def __len__(self):\n return self.n_frames\n\n @property\n def current_frame_id(self):\n return self._current_frame_id\n\n @current_frame_id.setter\n def current_frame_id(self, frame_id):\n # Check if the frame changed.\n last_frame_id = self._current_frame_id if self._enabled_frames is None else self._internal_frame_id\n if self.n_frames == 1 or frame_id == last_frame_id:\n return\n\n self.on_before_frame_update()\n if self._enabled_frames is None:\n if frame_id < 0:\n self._current_frame_id = 0\n elif frame_id >= len(self):\n self._current_frame_id = len(self) - 1\n else:\n self._current_frame_id = frame_id\n else:\n # If an enabled_frames is present use it to get the current frame.\n if frame_id < 0:\n self._internal_frame_id = 0\n elif frame_id >= self._enabled_frames.shape[0]:\n self._internal_frame_id = self._enabled_frames.shape[0] - 1\n else:\n self._internal_frame_id = frame_id\n self._current_frame_id = self._enabled_frame_id[self._internal_frame_id]\n # Update enabled using the mask.\n self.enabled = self._enabled_frames[self._internal_frame_id]\n\n # Update frame id of all children nodes.\n for n in self.nodes:\n n.current_frame_id = self._current_frame_id\n\n self.on_frame_update()\n if self.parent and (self._positions.shape[0] > 1 or self._rotations.shape[0] > 1 or self._scales.shape[0] > 1):\n self.update_transform(self.parent.model_matrix)\n\n def next_frame(self):\n self.current_frame_id = self.current_frame_id + 1 if self.current_frame_id < len(self) - 1 else 0\n\n def previous_frame(self):\n self.current_frame_id = self.current_frame_id - 1 if self.current_frame_id > 0 else len(self) - 1\n\n def on_before_frame_update(self):\n \"\"\"Called when the current frame is about to change, 'self.current_frame_id' still has the id of the\n previous frame.\"\"\"\n pass\n\n def on_frame_update(self):\n \"\"\"Called when the current frame is changed.\"\"\"\n pass\n\n def add(self, *nodes, **kwargs):\n self._add_nodes(*nodes, **kwargs)\n\n def _add_node(self, n: \"Node\", show_in_hierarchy=True, expanded=False, enabled=True):\n \"\"\"\n Add a single node\n :param show_in_hierarchy: Whether to show the node in the scene hierarchy.\n :param expanded: Whether the node is initially expanded in the GUI.\n \"\"\"\n if n is None:\n return\n n._show_in_hierarchy = show_in_hierarchy\n n._expanded = expanded\n n._enabled = enabled if n._enabled_frames is None else n._enabled_frames[n.current_frame_id]\n self.nodes.append(n)\n n.parent = self\n n.update_transform(self.model_matrix)\n\n def _add_nodes(self, *nodes, **kwargs):\n \"\"\"Add multiple nodes\"\"\"\n for n in nodes:\n self._add_node(n, **kwargs)\n\n def remove(self, *nodes):\n for n in nodes:\n n.release()\n try:\n self.nodes.remove(n)\n except:\n pass\n\n @property\n def show_in_hierarchy(self):\n return self._show_in_hierarchy\n\n @property\n def enabled(self):\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n self._enabled = enabled\n\n @property\n def expanded(self):\n return self._expanded\n\n @expanded.setter\n def expanded(self, expanded):\n self._expanded = expanded\n\n def is_transparent(self):\n \"\"\"\n Returns true if the object is transparent and should thus be sorted when rendering.\n Subclassess that use a different color should implement this method to be rendered correctly when transparent.\n \"\"\"\n return self.material.color[3] < 1.0\n\n def gui(self, imgui):\n \"\"\"\n Render GUI for custom node properties and controls. Implementation optional.\n Elements rendered here will show up in the scene hierarchy\n :param imgui: imgui context.\n See https://pyimgui.readthedocs.io/en/latest/reference/imgui.core.html for available elements to render\n \"\"\"\n pass\n\n def gui_modes(self, imgui):\n \"\"\"Render GUI with toolbar (tools) for this particular node\"\"\"\n\n def gui_animation(self, imgui):\n \"\"\"Render GUI for animation related settings\"\"\"\n\n if self._enabled_frames is None:\n if self.n_frames > 1:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self.current_frame_id,\n min_value=0,\n max_value=self.n_frames - 1,\n )\n if u:\n self.current_frame_id = fid\n else:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self._internal_frame_id,\n min_value=0,\n max_value=self._enabled_frames.shape[0] - 1,\n )\n if u:\n self.current_frame_id = fid\n\n def gui_affine(self, imgui):\n \"\"\"Render GUI for affine transformations\"\"\"\n # Position controls\n up, pos = imgui.drag_float3(\n \"Position##pos{}\".format(self.unique_name),\n *self.position,\n 1e-2,\n format=\"%.2f\",\n )\n if up:\n self.position = pos\n\n # Rotation controls\n euler_angles = rot2euler_numpy(self.rotation[np.newaxis], degrees=True)[0]\n ur, euler_angles = imgui.drag_float3(\n \"Rotation##pos{}\".format(self.unique_name),\n *euler_angles,\n 1e-2,\n format=\"%.2f\",\n )\n if ur:\n self.rotation = euler2rot_numpy(np.array(euler_angles)[np.newaxis], degrees=True)[0]\n\n # Scale controls\n us, scale = imgui.drag_float(\n \"Scale##scale{}\".format(self.unique_name),\n self.scale,\n 1e-2,\n min_value=0.001,\n max_value=100.0,\n format=\"%.3f\",\n )\n if us:\n self.scale = scale\n\n def gui_material(self, imgui):\n \"\"\"Render GUI with material properties\"\"\"\n\n # Color Control\n uc, color = imgui.color_edit4(\"Color##color{}'\".format(self.unique_name), *self.material.color)\n if uc:\n self.color = color\n\n # Diffuse\n ud, diffuse = imgui.slider_float(\n \"Diffuse##diffuse{}\".format(self.unique_name),\n self.material.diffuse,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ud:\n self.material.diffuse = diffuse\n\n # Ambient\n ua, ambient = imgui.slider_float(\n \"Ambient##ambient{}\".format(self.unique_name),\n self.material.ambient,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ua:\n self.material.ambient = ambient\n\n def gui_io(self, imgui):\n \"\"\"Render GUI for import/export\"\"\"\n pass\n\n def gui_mode_view(self, imgui):\n \"\"\"Render custom GUI for view mode\"\"\"\n pass\n\n def gui_context_menu(self, imgui, x: int, y: int):\n _, self.enabled = imgui.checkbox(\"Enabled\", self.enabled)\n if any([n._show_in_hierarchy for n in self.nodes]):\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n for n in self.nodes:\n if not n._show_in_hierarchy:\n continue\n if imgui.begin_menu(f\"{n.name}##{n.uid}\"):\n n.gui_context_menu(imgui, x, y)\n imgui.end_menu()\n\n # Renderable\n @staticmethod\n def once(func):\n def _decorator(self, *args, **kwargs):\n if self.is_renderable:\n return\n else:\n func(self, *args, **kwargs)\n self.is_renderable = True\n\n return _decorator\n\n def make_renderable(self, ctx):\n \"\"\"\n Prepares this object for rendering. This function must be called before `render` is used.\n :param ctx: The moderngl context.\n \"\"\"\n pass\n\n def render(self, camera, position=None, rotation=None, **kwargs):\n \"\"\"Render the current frame in this sequence.\"\"\"\n pass\n\n def render_positions(self, prog):\n \"\"\"\n Render with a VAO with only positions bound, used for shadow mapping, fragmap and depth prepass.\n \"\"\"\n pass\n\n def redraw(self, **kwargs):\n \"\"\"Perform update and redraw operations. Push to the GPU when finished. Recursively redraw child nodes\"\"\"\n for n in self.nodes:\n n.redraw(**kwargs)\n\n def set_camera_matrices(self, prog, camera, **kwargs):\n \"\"\"Set the model view projection matrix in the given program.\"\"\"\n # Transpose because np is row-major but OpenGL expects column-major.\n prog[\"model_matrix\"].write(self.model_matrix.T.astype(\"f4\").tobytes())\n prog[\"view_projection_matrix\"].write(camera.get_view_projection_matrix().T.astype(\"f4\").tobytes())\n\n def receive_shadow(self, program, **kwargs):\n \"\"\"\n Call this function if the renderable is to receive shadows.\n :param program: The shader program that can shade with shadows.\n :param kwargs: The render kwargs.\n \"\"\"\n if kwargs.get(\"shadows_enabled\", False):\n lights = kwargs[\"lights\"]\n\n for i, light in enumerate(lights):\n if light.shadow_enabled and light.shadow_map:\n light_matrix = light.mvp() @ self.model_matrix\n program[f\"dirLights[{i}].matrix\"].write(light_matrix.T.tobytes())\n\n # Bind shadowmap to slot i + 1, we reserve slot 0 for the mesh texture\n # and use slots 1 to (#lights + 1) for shadow maps\n light.shadow_map.use(location=i + 1)\n\n # Set sampler uniforms\n uniform = program[f\"shadow_maps\"]\n uniform.value = 1 if uniform.array_length == 1 else [*range(1, len(lights) + 1)]\n\n def render_shadowmap(self, light_matrix):\n if not self.cast_shadow or self.depth_only_program is None or self.color[3] == 0.0:\n return\n\n prog = self.depth_only_program\n prog[\"model_matrix\"].write(self.model_matrix.T.tobytes())\n prog[\"view_projection_matrix\"].write(light_matrix.T.tobytes())\n\n self.render_positions(prog)\n\n def render_fragmap(self, ctx, camera, uid=None):\n if not self.fragmap or self.fragmap_program is None:\n return\n\n # Transpose because np is row-major but OpenGL expects column-major.\n prog = self.fragmap_program\n self.set_camera_matrices(prog, camera)\n\n # Render with the specified object uid, if None use the node uid instead.\n prog[\"obj_id\"] = uid or self.uid\n\n if self.backface_culling or self.backface_fragmap:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n\n # If backface_fragmap is enabled for this node only render backfaces\n if self.backface_fragmap:\n ctx.cull_face = \"front\"\n\n self.render_positions(prog)\n\n # Restore cull face to back\n if self.backface_fragmap:\n ctx.cull_face = \"back\"\n\n def render_depth_prepass(self, camera, **kwargs):\n if not self.depth_prepass or self.depth_only_program is None:\n return\n\n prog = self.depth_only_program\n self.set_camera_matrices(prog, camera)\n self.render_positions(prog)\n\n def render_outline(self, ctx, camera):\n if self.outline and self.outline_program is not None:\n prog = self.outline_program\n self.set_camera_matrices(prog, camera)\n\n if self.backface_culling:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n self.render_positions(prog)\n\n # Render children node recursively.\n for n in self.nodes:\n n.render_outline(ctx, camera)\n\n def release(self):\n \"\"\"\n Release all OpenGL resources used by this node and any of its children. Subclasses that instantiate OpenGL\n objects should implement this method with '@hooked' to avoid leaking resources.\n \"\"\"\n for n in self.nodes:\n n.release()\n\n def on_selection(self, node, instance_id, tri_id):\n \"\"\"\n Called when the node is selected\n\n :param node: the node which was clicked (can be None if the selection wasn't a mouse event)\n :param instance_id: the id of the instance that was clicked, 0 if the object is not instanced\n (can be None if the selection wasn't a mouse event)\n :param tri_id: the id of the triangle that was clicked from the 'node' mesh\n (can be None if the selection wasn't a mouse event)\n \"\"\"\n pass\n\n def key_event(self, key, wnd_keys):\n \"\"\"\n Handle shortcut key presses (if you are the selected object)\n \"\"\"\n pass\n\n def update_frames(self, *args, **kwargs):\n pass\n\n def add_frames(self, *args, **kwargs):\n pass\n\n def remove_frames(self, *args, **kwargs):\n pass\n\n def _export_usd_recursively(self, stage, usd_path, directory, verbose):\n if verbose:\n print(usd_path)\n for n in self.nodes:\n if n.export_usd_enabled:\n n.export_usd(stage, usd_path, directory, verbose)\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n \"\"\"\n Export the node into an USD file. Nodes that implement this method should use\n recursively call this for every children that should also be exported.\n\n :param stage: an object of type Usd.Stage into which to export the node\n :param usd_path: the path of the parent object in the USD file scene hierarchy.\n \"\"\"\n from pxr import Gf, UsdGeom\n\n usd_path = f\"{usd_path}/{self.name.replace(' ', '_')}_{self.uid:03}\"\n\n # Transform.\n xform = UsdGeom.Xform.Define(stage, usd_path)\n a_xform = xform.AddTransformOp()\n a_xform.Set(Gf.Matrix4d(self.get_local_transform().astype(np.float64).T))\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "hooked", "path": "aitviewer/utils/decorators.py", "snippet": "class hooked:\n def __init__(self, fn):\n self.fn = fn\n\n def __set_name__(self, owner, name):\n func = self.fn\n\n def _decorator(self, *args, **kwargs):\n super_obj = super(owner, self)\n super_fn = getattr(super_obj, func.__name__)\n super_fn(*args, **kwargs)\n return func(self, *args, **kwargs)\n\n setattr(owner, name, _decorator)\n\n def __call__(self):\n assert (\n False\n ), \"@hooked decorator object should never be called directly. This can happen if you apply this decorator to a function that is not a method.\"" } ]
import numpy as np from skimage import measure from aitviewer.renderables.bounding_boxes import BoundingBoxes from aitviewer.renderables.lines import Lines from aitviewer.renderables.meshes import Meshes from aitviewer.scene.node import Node from aitviewer.utils.decorators import hooked
19,115
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class SDF(Node): """ Renderable that can be used to draw level sets of a dense SDF volume meshed using marching cubes. This renderable internally uses the marching cubes algorithm from skimage. For a faster marching cubes implementation see the Volume renderable. """ def __init__( self, volume, size=(1, 1, 1), level=0.0, color=(0.7, 0.7, 0.7, 1.0), level_sets=None, level_set_colors=None, mc_step_size=1, **kwargs, ): """Initializer. :param volume: np array of shape (X, Y, Z) of signed distance values :param size: size of the volume in local units. :param level: the level set used for the main mesh. :param color: color of the main mesh. :param level_sets: a list or array of additional level set values to display. :param level_set_colors: a list or array of shape (L, 4) of the same length as the level_set parameter with colors to use for the additional level sets. :param mc_step_size: step size used for marching cubes. :param **kwargs: arguments forwarded to the Node constructor. """ assert len(volume.shape) == 3 and len(size) == 3 kwargs["gui_material"] = False super().__init__(**kwargs) self.volume = volume self.size = np.array((size), np.float32) # Mesh. verts, faces, normals, _ = measure.marching_cubes( volume, level, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size )
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class SDF(Node): """ Renderable that can be used to draw level sets of a dense SDF volume meshed using marching cubes. This renderable internally uses the marching cubes algorithm from skimage. For a faster marching cubes implementation see the Volume renderable. """ def __init__( self, volume, size=(1, 1, 1), level=0.0, color=(0.7, 0.7, 0.7, 1.0), level_sets=None, level_set_colors=None, mc_step_size=1, **kwargs, ): """Initializer. :param volume: np array of shape (X, Y, Z) of signed distance values :param size: size of the volume in local units. :param level: the level set used for the main mesh. :param color: color of the main mesh. :param level_sets: a list or array of additional level set values to display. :param level_set_colors: a list or array of shape (L, 4) of the same length as the level_set parameter with colors to use for the additional level sets. :param mc_step_size: step size used for marching cubes. :param **kwargs: arguments forwarded to the Node constructor. """ assert len(volume.shape) == 3 and len(size) == 3 kwargs["gui_material"] = False super().__init__(**kwargs) self.volume = volume self.size = np.array((size), np.float32) # Mesh. verts, faces, normals, _ = measure.marching_cubes( volume, level, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size )
self.mesh = Meshes(verts, faces, vertex_normals=-normals, color=color, name="Mesh")
2
2023-12-07 16:13:50+00:00
24k
nexB/dejacode
license_library/views.py
[ { "identifier": "add_client_data", "path": "dje/client_data.py", "snippet": "def add_client_data(request, **kwargs):\n \"\"\"\n Set values on the request, to be available in the JavaScript client data object.\n On the client side, the values are accessible through ``NEXB.client_data``.\n \"\"\"\n if not hasattr(request, 'client_data'):\n request.client_data = {}\n request.client_data.update(kwargs)" }, { "identifier": "urlize_target_blank", "path": "dje/templatetags/dje_tags.py", "snippet": "@register.filter(is_safe=True, needs_autoescape=True)\n@stringfilter\ndef urlize_target_blank(value, autoescape=True):\n \"\"\"\n Wrap `urlize` to inject the `target=\"_blank\"` attribute.\n Also adds support for `ftp://`.\n \"\"\"\n if value.startswith(\"ftp://\"):\n link = f'<a target=\"_blank\" href=\"{value}\" rel=\"noreferrer nofollow\">{value}</a>'\n else:\n link = _urlize(value, nofollow=True, autoescape=autoescape)\n link = link.replace(\"<a\", '<a target=\"_blank\"')\n return format_html(link)" }, { "identifier": "URN_HELP_TEXT", "path": "dje/urn_resolver.py", "snippet": "URN_HELP_TEXT = \"URN is a globally unique and universal way to reference data.\"" }, { "identifier": "get_help_text", "path": "dje/utils.py", "snippet": "def get_help_text(opts, field_name):\n \"\"\"\n Return a cleaned help_text for a given `field_name` using the Model\n Meta `opts`.\n Support both the model class or the meta class as input.\n \"\"\"\n if not isinstance(opts, Options) and issubclass(opts, models.Model):\n opts = opts._meta\n return opts.get_field(field_name).help_text" }, { "identifier": "AcceptAnonymousMixin", "path": "dje/views.py", "snippet": "class AcceptAnonymousMixin:\n \"\"\"\n View mixin which accept Anonymous Users if the ANONYMOUS_USERS_DATASPACE\n setting is enabled.\n \"\"\"\n\n @method_decorator(accept_anonymous)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)" }, { "identifier": "AdminLinksDropDownMixin", "path": "dje/views.py", "snippet": "class AdminLinksDropDownMixin:\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n opts = self.model._meta\n info = opts.app_label, opts.model_name\n\n context[\"changelist_url\"] = reverse(\"admin:{}_{}_changelist\".format(*info))\n context[\"addition_url\"] = reverse(\"admin:{}_{}_add\".format(*info))\n\n with suppress(NoReverseMatch):\n context[\"import_url\"] = reverse(\"admin:{}_{}_import\".format(*info))\n\n context[\"show_admin_links\"] = self.request.user.is_staff and (\n context[\"has_add_permission\"] or context[\"has_change_permission\"]\n )\n\n return context" }, { "identifier": "DataspacedFilterView", "path": "dje/views.py", "snippet": "class DataspacedFilterView(\n DataspaceScopeMixin,\n GetDataspaceMixin,\n HasPermissionMixin,\n TableHeaderMixin,\n PreviousNextPaginationMixin,\n FilterView,\n):\n template_name = \"object_list_base.html\"\n template_list_table = None\n paginate_by = settings.PAGINATE_BY or 100\n # Required if `show_previous_and_next_object_links` enabled on the\n # details view.\n put_results_in_session = False\n group_name_version = False\n strict = False\n\n def get_filterset_kwargs(self, filterset_class):\n \"\"\"\n Add the dataspace in the filterset kwargs.\n\n Deletes the page_kwarg from the data if present,\n so the current pagination value is not included in the filters.\n \"\"\"\n kwargs = super().get_filterset_kwargs(filterset_class)\n\n if self.page_kwarg in self.request.GET:\n data = self.request.GET.copy()\n del data[\"page\"]\n kwargs.update({\"data\": data})\n\n kwargs.update({\"dataspace\": self.dataspace})\n return kwargs\n\n def get_queryset(self):\n \"\"\"Scope the QuerySet with the request User Dataspace.\"\"\"\n return super().get_queryset().scope(self.dataspace)\n\n def get_extra_add_urls(self):\n extra_add_urls = []\n opts = self.model._meta\n\n with suppress(NoReverseMatch):\n import_url = reverse(f\"admin:{opts.app_label}_{opts.model_name}_import\")\n extra_add_urls.append((f\"Import {opts.verbose_name_plural}\", import_url))\n\n return extra_add_urls\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n\n if self.put_results_in_session:\n session_key = build_session_key(self.model._meta.verbose_name)\n object_ids = [int(obj.id) for obj in context_data[\"object_list\"]]\n self.request.session[session_key] = object_ids\n\n if self.group_name_version:\n if not self.request.GET.get(\"sort\", None):\n name_version_groups = group_by_name_version(context_data[\"object_list\"])\n else:\n name_version_groups = [[obj] for obj in context_data[\"object_list\"]]\n\n context_data.update(\n {\n \"name_version_groups\": name_version_groups,\n \"is_grouping_active\": bool(\n [1 for group in name_version_groups if len(group) > 1]\n ),\n }\n )\n\n opts = self.model._meta\n\n add_url = None\n with suppress(NoReverseMatch):\n add_url = reverse(f\"{opts.app_label}:{opts.model_name}_add\")\n\n context_data.update(\n {\n \"opts\": opts,\n \"add_url\": add_url,\n \"extra_add_urls\": self.get_extra_add_urls(),\n \"preserved_filters\": get_preserved_filters(self.request, self.model),\n # Required for compatibility with navbar_header.html\n \"search_query\": self.request.GET.get(\"q\", \"\"),\n \"template_list_table\": self.template_list_table,\n }\n )\n\n return context_data" }, { "identifier": "DataspaceScopeMixin", "path": "dje/views.py", "snippet": "class DataspaceScopeMixin:\n \"\"\"Mixin to scope the get_queryset() method to the current user dataspace.\"\"\"\n\n # Set to True to include the object form the reference dataspace in the QuerySet\n include_reference_dataspace = False\n\n def get_queryset(self):\n \"\"\"Return the `QuerySet` scoped to the current user dataspace.\"\"\"\n qs = super().get_queryset()\n dataspace = self.request.user.dataspace\n return qs.scope(dataspace, include_reference=self.include_reference_dataspace)" }, { "identifier": "Header", "path": "dje/views.py", "snippet": "MIME_TYPES = {\n \"xls\": \"application/vnd.ms-excel\",\n \"pdf\": \"application/pdf\",\n \"html\": \"text/html\",\n}\n EMPTY_VALUES = (\"\", None, []) # Used for conditions without absorbing `False` values.\n COPY_NB_OBJECT_LIMIT = 100\nclass AcceptAnonymousMixin:\nclass ReferenceDataspaceOnly(UserPassesTestMixin):\nclass IsStaffMixin(UserPassesTestMixin):\nclass IsSuperuserMixin(UserPassesTestMixin):\nclass HasPermissionMixin:\nclass AdminLinksDropDownMixin:\nclass GetDataspaceMixin:\nclass DataspaceScopeMixin:\nclass PreviousNextPaginationMixin:\nclass TableHeaderMixin:\nclass DataspacedFilterView(\n DataspaceScopeMixin,\n GetDataspaceMixin,\n HasPermissionMixin,\n TableHeaderMixin,\n PreviousNextPaginationMixin,\n FilterView,\n):\nclass DataspacedModelFormMixin:\nclass GetDataspacedObjectMixin:\nclass LicenseDataForBuilderMixin:\nclass DataspacedCreateView(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n SuccessMessageMixin,\n DataspacedModelFormMixin,\n CreateView,\n):\nclass DataspacedUpdateView(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n GetDataspacedObjectMixin,\n DataspacedModelFormMixin,\n UpdateView,\n):\nclass DataspacedDeleteView(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n GetDataspacedObjectMixin,\n DataspacedModelFormMixin,\n DeleteView,\n):\nclass TabContentView(\n GetDataspacedObjectMixin,\n DataspaceScopeMixin,\n DetailView,\n):\nclass SendAboutFilesMixin:\nclass SendAboutFilesView(\n LoginRequiredMixin,\n DataspaceScopeMixin,\n GetDataspacedObjectMixin,\n SendAboutFilesMixin,\n BaseDetailView,\n):\nclass MultiSendAboutFilesView(\n LoginRequiredMixin,\n SendAboutFilesMixin,\n View,\n):\nclass TabSetMixin:\nclass ObjectDetailsView(\n DataspaceScopeMixin,\n HasPermissionMixin,\n GetDataspacedObjectMixin,\n TabSetMixin,\n DetailView,\n):\nclass HierarchyView(DataspaceScopeMixin, DetailView):\nclass DataspaceAwareRelatedLookup(RelatedLookup):\nclass DataspaceAwareAutocompleteLookup(AutocompleteLookup):\nclass GlobalSearchListView(AcceptAnonymousMixin, TemplateView):\nclass DownloadableMixin:\nclass BootstrapCSSMixin:\nclass ActivityLog(\n LoginRequiredMixin,\n BootstrapCSSMixin,\n DownloadableMixin,\n TemplateView,\n):\nclass AccountProfileView(\n LoginRequiredMixin,\n FormView,\n):\nclass APIWrapperPaginator(Paginator):\nclass APIWrapperListView(\n PreviousNextPaginationMixin,\n ListView,\n):\nclass NotificationsCountMixin:\nclass UnreadNotificationsList(\n NotificationsCountMixin,\n notifications_views.UnreadNotificationsList,\n):\nclass AllNotificationsList(\n NotificationsCountMixin,\n notifications_views.AllNotificationsList,\n):\nclass IntegrationsStatusView(\n LoginRequiredMixin,\n IsStaffMixin,\n TemplateView,\n):\nclass ExportSPDXDocumentView(\n LoginRequiredMixin,\n DataspaceScopeMixin,\n GetDataspacedObjectMixin,\n BaseDetailView,\n):\nclass ExportCycloneDXBOMView(\n LoginRequiredMixin,\n DataspaceScopeMixin,\n GetDataspacedObjectMixin,\n BaseDetailView,\n):\n def dispatch(self, *args, **kwargs):\n def test_func(self):\n def test_func(self):\n def test_func(self):\n def has_permission(self, action):\n def get_context_data(self, **kwargs):\n def get_context_data(self, **kwargs):\n def dispatch(self, request, *args, **kwargs):\n def get_dataspace(self):\n def get_context_data(self, **kwargs):\n def get_queryset(self):\n def get_previous_next(self, page_obj):\n def get_context_data(self, **kwargs):\n def get_table_headers(self):\n def get_context_data(self, **kwargs):\n def get_filterset_kwargs(self, filterset_class):\n def get_queryset(self):\n def get_extra_add_urls(self):\n def get_context_data(self, **kwargs):\n def get_form_kwargs(self):\n def get_context_data(self, **kwargs):\n def get_object(self, queryset=None):\n def get_context_data(self, **kwargs):\n def get_success_message(self, cleaned_data):\n def form_valid(self, form):\n def get_deletion_status(self):\n def get_context_data(self, **kwargs):\n def form_valid(self, form):\n def get_success_url(self):\n def get_filename(instance):\n def get_zipped_response(about_files, filename):\n def get(self, request, *args, **kwargs):\n def get(self, request):\ndef index_dispatch(request):\ndef home_view(request):\ndef urn_resolve_view(request, urn=None):\ndef build_session_key(model_name, prefix=\"\"):\ndef normalize_tab_fields(tab_context):\n def get_tabsets(self):\n def get_tab_fields(self, tab_fields):\n def get_owner_hierarchy(owner):\n def tab_owner(self):\n def tab_components(self):\n def tab_license(self):\n def tab_activity(self, exclude_product_context=False):\n def tab_external_references(self):\n def tab_history(self):\n def get_package_fields(self, package, essential_tab=False):\n def show_usage_policy(value):\n def normalized_tabsets(tabsets):\n def get_referer_link(self):\n def get_context_data(self, **kwargs):\ndef object_copy_view(request):\ndef dataspace_choice_for_compare_view(request):\ndef object_compare_view(request):\ndef clone_dataset_view(request, pk):\n def get_context_data(self, **kwargs):\n def get_queryset(self):\n def set_dataspace_scope(self, qs):\n def get_annotated_queryset(self, qs):\n def get_searched_queryset(self, qs):\n def get_queryset(self):\n def get_list_view_results(self, view_class, dataspace):\n def get_context_data(self, **kwargs):\n def get(self, request, *args, **kwargs):\n def get_format(self):\n def get_root_filename(self):\n def get_filename(self, format):\n def get_context_data(self, **kwargs):\n def render_to_response(self, context, **response_kwargs):\n def get_bootstrap_css_code(self):\n def get_context_data(self, **kwargs):\n def get_days(self):\n def get_history_entries(self, days):\n def get_object_or_repr(history_entry):\n def get_objects(self, days):\n def get_format(self):\n def get_root_filename(self):\n def get_context_data(self, **kwargs):\n def get_context_data(self, **kwargs):\n def get_form_kwargs(self):\n def form_valid(self, form):\n def post(self, request, *args, **kwargs):\ndef docs_models_view(request):\n def get_limited_fields(model):\ndef manage_copy_defaults_view(request, pk):\ndef manage_tab_permissions_view(request, pk):\n def page(self, number):\n def get_paginator(self, *args, **kwargs):\n def get_context_data(self, **kwargs):\n def get_context_data(self, **kwargs):\n def get_integration_status(self, integration_class):\n def get_context_data(self, **kwargs):\ndef get_spdx_extracted_licenses(spdx_packages):\n def get(self, request, *args, **kwargs):\n def get_spdx_document(instance, user):\n def get(self, request, *args, **kwargs):\n def get_cyclonedx_bom(instance, user):" }, { "identifier": "ObjectDetailsView", "path": "dje/views.py", "snippet": "class ObjectDetailsView(\n DataspaceScopeMixin,\n HasPermissionMixin,\n GetDataspacedObjectMixin,\n TabSetMixin,\n DetailView,\n):\n template_name = \"object_details_base.html\"\n # The following requires put_results_in_session = True on the list view.\n show_previous_and_next_object_links = False\n\n def get_referer_link(self):\n \"\"\"\n Look in the HTTP_REFERER to find the origin of the request.\n If the user is coming from a object details view, we add\n the referer object URL in the context to be displayed in the UI.\n \"\"\"\n resolver = get_referer_resolver(self.request)\n if resolver and \"details\" in resolver.url_name:\n try:\n verbose_name = resolver.func.view_class.model._meta.verbose_name\n except AttributeError:\n return\n referer_path = urlparse(self.request.META.get(\"HTTP_REFERER\")).path\n if referer_path != self.request.path:\n return f'<a href=\"{referer_path}\">Return to {verbose_name}</a>'\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add the` previous` and `next` object in the context.\n Also adds the RequestTemplate if the workflow app is enabled and if the\n user is authenticated (not anonymous).\n \"\"\"\n context = super().get_context_data(**kwargs)\n opts = self.model._meta\n user = self.request.user\n is_reference_data = self.object.dataspace.is_reference\n\n # User needs to be authenticated to look into reference data\n if self.request.user.is_anonymous and is_reference_data and not self.is_user_dataspace:\n raise Http404\n\n viewname = f\"{opts.app_label}:{opts.model_name}_list\"\n view_args = []\n if is_reference_data and not self.is_user_dataspace:\n view_args = [self.object.dataspace]\n list_url = reverse(viewname, args=view_args)\n\n copy_or_update_link = None\n if user.is_staff and context[\"has_change_permission\"]:\n reference = Dataspace.objects.get_reference()\n if not is_reference_data and self.is_user_dataspace:\n object_in_reference = get_object_in(self.object, reference)\n if object_in_reference:\n copy_or_update_link = {\n \"label\": \"Check for Updates\",\n \"url\": object_in_reference.get_compare_url(),\n }\n\n elif is_reference_data and not self.is_user_dataspace:\n if get_object_in(self.object, user.dataspace):\n copy_or_update_link = {\n \"label\": \"Check for Updates\",\n \"url\": self.object.get_compare_url(),\n }\n else:\n copy_or_update_link = {\n \"label\": \"Copy to my Dataspace\",\n \"url\": self.object.get_copy_url(),\n }\n\n context.update(\n {\n \"tabsets\": self.normalized_tabsets(self.get_tabsets()),\n \"verbose_name\": opts.verbose_name,\n \"verbose_name_plural\": opts.verbose_name_plural,\n \"is_reference_data\": is_reference_data,\n \"is_user_dataspace\": self.is_user_dataspace,\n \"show_licenses_policy\": self.show_licenses_policy,\n \"list_url\": list_url,\n \"opts\": opts, # Required for the preserved_filters\n \"preserved_filters\": get_preserved_filters(self.request, self.model),\n \"copy_or_update_link\": copy_or_update_link,\n \"referer_link\": self.get_referer_link(),\n }\n )\n\n if self.show_previous_and_next_object_links:\n session_key = build_session_key(opts.verbose_name)\n session_ids = self.request.session.get(session_key)\n if session_ids:\n previous_id, next_id = get_previous_next(session_ids, int(self.object.id))\n if previous_id:\n with suppress(ObjectDoesNotExist):\n previous_object = self.model.objects.get(id=previous_id)\n context[\"previous_object_url\"] = previous_object.get_absolute_url()\n if next_id:\n with suppress(ObjectDoesNotExist):\n next_object = self.model.objects.get(id=next_id)\n context[\"next_object_url\"] = next_object.get_absolute_url()\n\n if user.is_authenticated and self.is_user_dataspace:\n context[\"request_templates\"] = (\n RequestTemplate.objects.scope(self.object.dataspace)\n .actives()\n .filter(include_applies_to=True)\n .for_content_type(ContentType.objects.get_for_model(self.model))\n )\n\n return context" }, { "identifier": "TabField", "path": "dje/views.py", "snippet": "MIME_TYPES = {\n \"xls\": \"application/vnd.ms-excel\",\n \"pdf\": \"application/pdf\",\n \"html\": \"text/html\",\n}\n EMPTY_VALUES = (\"\", None, []) # Used for conditions without absorbing `False` values.\n COPY_NB_OBJECT_LIMIT = 100\nclass AcceptAnonymousMixin:\nclass ReferenceDataspaceOnly(UserPassesTestMixin):\nclass IsStaffMixin(UserPassesTestMixin):\nclass IsSuperuserMixin(UserPassesTestMixin):\nclass HasPermissionMixin:\nclass AdminLinksDropDownMixin:\nclass GetDataspaceMixin:\nclass DataspaceScopeMixin:\nclass PreviousNextPaginationMixin:\nclass TableHeaderMixin:\nclass DataspacedFilterView(\n DataspaceScopeMixin,\n GetDataspaceMixin,\n HasPermissionMixin,\n TableHeaderMixin,\n PreviousNextPaginationMixin,\n FilterView,\n):\nclass DataspacedModelFormMixin:\nclass GetDataspacedObjectMixin:\nclass LicenseDataForBuilderMixin:\nclass DataspacedCreateView(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n SuccessMessageMixin,\n DataspacedModelFormMixin,\n CreateView,\n):\nclass DataspacedUpdateView(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n GetDataspacedObjectMixin,\n DataspacedModelFormMixin,\n UpdateView,\n):\nclass DataspacedDeleteView(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n GetDataspacedObjectMixin,\n DataspacedModelFormMixin,\n DeleteView,\n):\nclass TabContentView(\n GetDataspacedObjectMixin,\n DataspaceScopeMixin,\n DetailView,\n):\nclass SendAboutFilesMixin:\nclass SendAboutFilesView(\n LoginRequiredMixin,\n DataspaceScopeMixin,\n GetDataspacedObjectMixin,\n SendAboutFilesMixin,\n BaseDetailView,\n):\nclass MultiSendAboutFilesView(\n LoginRequiredMixin,\n SendAboutFilesMixin,\n View,\n):\nclass TabSetMixin:\nclass ObjectDetailsView(\n DataspaceScopeMixin,\n HasPermissionMixin,\n GetDataspacedObjectMixin,\n TabSetMixin,\n DetailView,\n):\nclass HierarchyView(DataspaceScopeMixin, DetailView):\nclass DataspaceAwareRelatedLookup(RelatedLookup):\nclass DataspaceAwareAutocompleteLookup(AutocompleteLookup):\nclass GlobalSearchListView(AcceptAnonymousMixin, TemplateView):\nclass DownloadableMixin:\nclass BootstrapCSSMixin:\nclass ActivityLog(\n LoginRequiredMixin,\n BootstrapCSSMixin,\n DownloadableMixin,\n TemplateView,\n):\nclass AccountProfileView(\n LoginRequiredMixin,\n FormView,\n):\nclass APIWrapperPaginator(Paginator):\nclass APIWrapperListView(\n PreviousNextPaginationMixin,\n ListView,\n):\nclass NotificationsCountMixin:\nclass UnreadNotificationsList(\n NotificationsCountMixin,\n notifications_views.UnreadNotificationsList,\n):\nclass AllNotificationsList(\n NotificationsCountMixin,\n notifications_views.AllNotificationsList,\n):\nclass IntegrationsStatusView(\n LoginRequiredMixin,\n IsStaffMixin,\n TemplateView,\n):\nclass ExportSPDXDocumentView(\n LoginRequiredMixin,\n DataspaceScopeMixin,\n GetDataspacedObjectMixin,\n BaseDetailView,\n):\nclass ExportCycloneDXBOMView(\n LoginRequiredMixin,\n DataspaceScopeMixin,\n GetDataspacedObjectMixin,\n BaseDetailView,\n):\n def dispatch(self, *args, **kwargs):\n def test_func(self):\n def test_func(self):\n def test_func(self):\n def has_permission(self, action):\n def get_context_data(self, **kwargs):\n def get_context_data(self, **kwargs):\n def dispatch(self, request, *args, **kwargs):\n def get_dataspace(self):\n def get_context_data(self, **kwargs):\n def get_queryset(self):\n def get_previous_next(self, page_obj):\n def get_context_data(self, **kwargs):\n def get_table_headers(self):\n def get_context_data(self, **kwargs):\n def get_filterset_kwargs(self, filterset_class):\n def get_queryset(self):\n def get_extra_add_urls(self):\n def get_context_data(self, **kwargs):\n def get_form_kwargs(self):\n def get_context_data(self, **kwargs):\n def get_object(self, queryset=None):\n def get_context_data(self, **kwargs):\n def get_success_message(self, cleaned_data):\n def form_valid(self, form):\n def get_deletion_status(self):\n def get_context_data(self, **kwargs):\n def form_valid(self, form):\n def get_success_url(self):\n def get_filename(instance):\n def get_zipped_response(about_files, filename):\n def get(self, request, *args, **kwargs):\n def get(self, request):\ndef index_dispatch(request):\ndef home_view(request):\ndef urn_resolve_view(request, urn=None):\ndef build_session_key(model_name, prefix=\"\"):\ndef normalize_tab_fields(tab_context):\n def get_tabsets(self):\n def get_tab_fields(self, tab_fields):\n def get_owner_hierarchy(owner):\n def tab_owner(self):\n def tab_components(self):\n def tab_license(self):\n def tab_activity(self, exclude_product_context=False):\n def tab_external_references(self):\n def tab_history(self):\n def get_package_fields(self, package, essential_tab=False):\n def show_usage_policy(value):\n def normalized_tabsets(tabsets):\n def get_referer_link(self):\n def get_context_data(self, **kwargs):\ndef object_copy_view(request):\ndef dataspace_choice_for_compare_view(request):\ndef object_compare_view(request):\ndef clone_dataset_view(request, pk):\n def get_context_data(self, **kwargs):\n def get_queryset(self):\n def set_dataspace_scope(self, qs):\n def get_annotated_queryset(self, qs):\n def get_searched_queryset(self, qs):\n def get_queryset(self):\n def get_list_view_results(self, view_class, dataspace):\n def get_context_data(self, **kwargs):\n def get(self, request, *args, **kwargs):\n def get_format(self):\n def get_root_filename(self):\n def get_filename(self, format):\n def get_context_data(self, **kwargs):\n def render_to_response(self, context, **response_kwargs):\n def get_bootstrap_css_code(self):\n def get_context_data(self, **kwargs):\n def get_days(self):\n def get_history_entries(self, days):\n def get_object_or_repr(history_entry):\n def get_objects(self, days):\n def get_format(self):\n def get_root_filename(self):\n def get_context_data(self, **kwargs):\n def get_context_data(self, **kwargs):\n def get_form_kwargs(self):\n def form_valid(self, form):\n def post(self, request, *args, **kwargs):\ndef docs_models_view(request):\n def get_limited_fields(model):\ndef manage_copy_defaults_view(request, pk):\ndef manage_tab_permissions_view(request, pk):\n def page(self, number):\n def get_paginator(self, *args, **kwargs):\n def get_context_data(self, **kwargs):\n def get_context_data(self, **kwargs):\n def get_integration_status(self, integration_class):\n def get_context_data(self, **kwargs):\ndef get_spdx_extracted_licenses(spdx_packages):\n def get(self, request, *args, **kwargs):\n def get_spdx_document(instance, user):\n def get(self, request, *args, **kwargs):\n def get_cyclonedx_bom(instance, user):" }, { "identifier": "LicenseFilterSet", "path": "license_library/filters.py", "snippet": "class LicenseFilterSet(DataspacedFilterSet):\n related_only = [\n \"category\",\n \"license_profile\",\n \"usage_policy\",\n ]\n q = MatchOrderedSearchFilter(\n label=_(\"Search\"),\n match_order_fields=[\"short_name\", \"key\", \"name\"],\n search_fields=[\n \"name\",\n \"short_name\",\n \"key\",\n \"keywords\",\n \"spdx_license_key\",\n \"owner__name\",\n \"owner__alias\",\n ],\n widget=forms.widgets.HiddenInput,\n )\n text_search = ProgressiveTextSearchFilter(\n label=_(\"License text search\"),\n search_fields=[\"full_text\"],\n widget=forms.widgets.HiddenInput,\n )\n sort = DefaultOrderingFilter(\n label=_(\"Sort\"),\n fields=[\n \"name\",\n \"category\",\n \"license_profile\",\n \"owner\",\n ],\n empty_label=\"Relevance\",\n )\n in_spdx_list = HasValueFilter(\n label=_(\"In SPDX list\"),\n field_name=\"spdx_license_key\",\n choices=(\n (\"yes\", _(\"In SPDX List\")),\n (\"no\", _(\"Not in SPDX List\")),\n ),\n widget=DropDownRightWidget,\n )\n category = django_filters.ModelMultipleChoiceFilter(\n label=_(\"Category\"),\n field_name=\"category__label\",\n to_field_name=\"label\",\n queryset=LicenseCategory.objects.all(),\n widget=BootstrapSelectMultipleWidget(\n search_placeholder=\"Search categories\",\n ),\n )\n license_profile = django_filters.ModelMultipleChoiceFilter(\n label=_(\"License profile\"),\n field_name=\"license_profile__name\",\n to_field_name=\"name\",\n queryset=LicenseProfile.objects.all(),\n widget=BootstrapSelectMultipleWidget(\n search_placeholder=\"Search license profiles\",\n ),\n )\n\n class Meta:\n model = License\n fields = [\n \"category\",\n \"category__license_type\",\n \"license_profile\",\n \"usage_policy\",\n \"in_spdx_list\",\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.filters[\"usage_policy\"].extra[\"to_field_name\"] = \"label\"\n self.filters[\"usage_policy\"].label = _(\"Policy\")\n self.filters[\"category__license_type\"].label = _(\"Type\")\n\n for filter_name in [\"category__license_type\", \"usage_policy\"]:\n self.filters[filter_name].extra[\"widget\"] = DropDownRightWidget()" }, { "identifier": "License", "path": "license_library/models.py", "snippet": "class License(\n LicenseSymbolMixin,\n ReferenceNotesMixin,\n UsagePolicyMixin,\n ExternalReferenceMixin,\n HistoryFieldsMixin,\n RequestMixin,\n DataspacedModel,\n):\n owner = models.ForeignKey(\n to=\"organization.Owner\",\n on_delete=models.PROTECT,\n help_text=_(\n \"An owner is an entity that is the original author or custodian of one or \"\n \"more software licenses, and which is responsible for the text of that license.\"\n ),\n )\n\n key = models.CharField(\n db_index=True,\n max_length=50,\n help_text=_(\"Unique key name of the license.\"),\n validators=[validate_slug_plus],\n )\n\n name = models.CharField(\n db_index=True,\n max_length=100,\n help_text=_(\"The full name of the license, as provided by the original authors.\"),\n )\n\n short_name = models.CharField(\n db_index=True,\n max_length=50,\n verbose_name=_(\"Short Name\"),\n help_text=_(\"Most commonly used name for the license, often abbreviated.\"),\n )\n\n keywords = models.CharField(\n db_index=True,\n max_length=500,\n blank=True,\n help_text=_(\n \"Keywords to associate with a license to ensure that the license will be \"\n \"found when a user searches on one or more of the keywords. Examples include \"\n \"alternative names for the license, or file/product names that are commonly \"\n \"associated with the license.\"\n ),\n )\n\n homepage_url = models.URLField(\n _(\"Homepage URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"Homepage URL for the license.\"),\n )\n\n full_text = NoStripTextField(\n blank=True,\n help_text=_(\n \"The full text of the license. Note that actual usage of a license with \"\n \"software may include copyright statements and owner information.\"\n ),\n )\n\n standard_notice = NoStripTextField(\n blank=True,\n help_text=_(\"The standard notice text for this license if it exists.\"),\n )\n\n text_urls = models.TextField(\n _(\"Text URLs\"),\n blank=True,\n help_text=_(\n \"URLs to the text of the license (plain text or HTML) on the main site of \"\n \"this license.\"\n ),\n )\n\n faq_url = models.URLField(\n _(\"FAQ URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"URL of a page with Frequently Asked Questions about this license.\"),\n )\n\n osi_url = models.URLField(\n _(\"OSI URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"URL on the OSI website http://opensource.org for OSI-approved licenses.\"),\n )\n\n other_urls = models.TextField(\n _(\"Other URLs\"),\n blank=True,\n help_text=_(\n \"Other URLs that identify this license, such as URLs to this license in \"\n \"different open-source projects. Obsolete links may be kept here, as they \"\n \"may be useful for historical analysis purpose.\"\n ),\n )\n\n reviewed = models.BooleanField(\n default=False,\n help_text=_(\n \"True / False (yes/no) - regarding whether a system license definition has \"\n \"been reviewed by an administrator. Defaults to False.\"\n ),\n )\n\n publication_year = models.CharField(\n max_length=4,\n blank=True,\n help_text=_(\"Year this license was first published, in four-digits format.\"),\n )\n\n spdx_license_key = models.CharField(\n _(\"SPDX short identifier\"),\n db_index=True,\n blank=True,\n max_length=50,\n validators=[validate_spdx_license_key],\n help_text=_(\n \"Short identifier of the license as stated on each license detail page at \"\n \"https://spdx.org/licenses/ or a LicenseRef value that points to another \"\n \"license list.\"\n ),\n )\n\n category = models.ForeignKey(\n to=\"license_library.LicenseCategory\",\n null=True,\n blank=True,\n on_delete=models.PROTECT,\n help_text=_(\n \"A license category, identified by a code, provides a major grouping for \"\n \"licenses, generally describing the relationship between the licensor and \"\n \"licensee.\"\n ),\n )\n\n license_style = models.ForeignKey(\n to=\"license_library.LicenseStyle\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n help_text=_(\n \"A license style identifies a group of miscellaneous characteristics about a \"\n \"license, which may include a combination of restrictions about software \"\n \"modification and usage\"\n ),\n )\n\n license_profile = models.ForeignKey(\n to=\"license_library.LicenseProfile\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n verbose_name=_(\"License profile\"),\n help_text=format_lazy(\n \"{verbose_name}: a selection of license tags and their values, identified by a \"\n \"numeric code, in order to provide a convenient way to assign a set of tag values to \"\n \"a license. \"\n 'A \"Tag\" identifies a frequently encountered obligation, restriction, or other '\n \"notable characteristic of license terms. \"\n \"Note that individual tag value assignments may vary by license.\",\n verbose_name=_(\"License profile\"),\n ),\n )\n\n license_status = models.ForeignKey(\n to=\"license_library.LicenseStatus\",\n verbose_name=_(\"configuration status\"),\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n help_text=_(\n \"An organization can use the license status to communicate the current stage \"\n \"of the license configuration review process.\"\n ),\n )\n\n is_active = models.BooleanField(\n verbose_name=_(\"Is active\"),\n null=True,\n db_index=True,\n help_text=_(\n \"When set to True (Yes), this field indicates that a license definition in the \"\n \"library is currently in use (active). When set to False (No), this field indicates \"\n \"that a license is deprecated (inactive) and should not be used, and the license \"\n \"will not appear in the user views. When the field value is Unknown, the license \"\n \"will not appear in the user views, usually suggesting that the license has not \"\n \"yet been evaluated.\"\n ),\n )\n\n curation_level = models.PositiveSmallIntegerField(\n db_index=True,\n default=0,\n validators=[validators.MaxValueValidator(100)],\n help_text=_(\n \"A numeric value, from 0 to 100, that indicates the level of completeness of all the \"\n \"pertinent license data, as well as the state of that data being reviewed by a senior \"\n 'administrator. General Guidelines: \"10\" indicates basic data present. \"20\" indicates '\n 'Category and License Style assigned. \"30\" indicates all Obligation Tags are set. '\n '\"40\" indicates all License Tags are set. \"50\" indicates all previous conditions '\n \"plus URL fields set. Anything above that is at the discretion of a senior \"\n \"administrative reviewer.\"\n ),\n )\n\n admin_notes = models.TextField(\n blank=True,\n help_text=_(\n \"Internal notes for administrative use only, primarily intended to \"\n \"communicate special considerations about the interpretation of a license.\"\n ),\n )\n\n guidance = models.TextField(\n blank=True,\n help_text=format_lazy(\n \"Guidance notes maintained by an administrator to be communicated to the users who \"\n \"view the {license_app}, primarily intended to provide cautionary and/or policy \"\n \"information.\",\n license_app=_(\"License Library\"),\n ),\n )\n\n special_obligations = models.TextField(\n blank=True,\n help_text=format_lazy(\n \"A concise description, maintained by an administrator, of the obligations \"\n \"(or restrictions) mandated by the license which are not communicated by the \"\n \"standard tag assignments of {license_profile} associated with this License.\",\n license_profile=_(\"License profile\"),\n ),\n )\n\n tags = models.ManyToManyField(\n to=\"license_library.LicenseTag\",\n through=\"LicenseAssignedTag\",\n )\n\n is_component_license = models.BooleanField(\n default=False,\n db_index=True,\n help_text=_(\n \"When set to Yes, indicates that this license is assigned by a \"\n \"component-creator to one or more versions of a component, and is not \"\n \"generally used by other components.\"\n ),\n )\n\n is_exception = models.BooleanField(\n default=False,\n db_index=True,\n help_text=_(\n \"When set to Yes, indicates that this license is actually an \"\n \"exception applied to another license in order to modify \"\n \"specific conditions of that other license.\"\n ),\n )\n\n guidance_url = models.CharField(\n _(\"Guidance URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"A URL to a page that documents your organization's policies and procedures \"\n \"that relate to the obligations and restrictions associated with this \"\n \"license or with similar licenses.\"\n ),\n )\n\n popularity = models.PositiveSmallIntegerField(\n db_index=True,\n default=0,\n help_text=_(\n \"A numeric value assigned to a license and maintained by a DejaCode \"\n \"administrator, that indicates the relative popularity of a license as used by \"\n \"public software projects. The value influences the default license ordering \"\n \"of the User License List, as well as the ordering of the suggested licenses \"\n \"presented as a dropdown list when you enter text in a DejaCode license \"\n \"expression field. Popularity values are originally provided in DejaCode \"\n \"Reference Data, but your administrator has the option to modify them for your \"\n \"dataspace.\"\n ),\n )\n\n language = models.CharField(\n max_length=10,\n choices=license_library_app.languages,\n blank=True,\n help_text=_(\"The language for this license, stored in standard language ID format.\"),\n )\n\n objects = DataspacedManager.from_queryset(LicenseQuerySet)()\n\n class Meta:\n # This is a special case for the unique_together, ie several entries\n # It's important that's the first entry is 'key' in this case as it is\n # used to Match a License inside a dataspace\n unique_together = (\n (\"dataspace\", \"key\"),\n (\"dataspace\", \"name\"),\n (\"dataspace\", \"short_name\"),\n (\"dataspace\", \"uuid\"),\n )\n ordering = [\"-popularity\", \"name\"]\n permissions = (\n (\"change_usage_policy_on_license\", \"Can change the usage_policy of license\"),\n )\n\n def __str__(self):\n return f\"{self.short_name} ({self.key})\"\n\n def clean(self, from_api=False):\n if self.is_active is False and self.spdx_license_key:\n raise ValidationError(\"A deprecated license must not have an SPDX license key.\")\n super().clean(from_api)\n\n def _get_unique_checks(self, exclude=None):\n \"\"\"\n Ensure SPDX license key are unique within a Dataspace.\n This is a soft-constraint, ie not enforced at the database level.\n The check on `spdx_license_key` is not included if the value is blank.\n \"\"\"\n unique_checks, date_checks = super()._get_unique_checks(exclude)\n\n if self.spdx_license_key:\n unique_together = (\"dataspace\", \"spdx_license_key\")\n unique_checks.append((self.__class__, unique_together))\n\n return unique_checks, date_checks\n\n @property\n def urn(self):\n return urn.build(\"license\", key=self.key)\n\n def get_url(self, name, params=None):\n if not params:\n params = [self.dataspace.name, self.key]\n return super().get_url(name, params)\n\n def get_absolute_url(self):\n return self.get_url(\"details\")\n\n @property\n def details_url(self):\n return self.get_absolute_url()\n\n def get_delete_url(self):\n return self.get_url(\"delete\")\n\n def get_download_text_url(self):\n return self.get_url(\"download_text\")\n\n def get_details_url_for_expression(self):\n return self.get_absolute_link(field_name=\"key\", title=self.short_name)\n\n @property\n def permission_protected_fields(self):\n return {\"usage_policy\": \"change_usage_policy_on_license\"}\n\n @property\n def case_insensitive_unique_on(self):\n return [\"name\", \"short_name\", \"key\"]\n\n def where_used(self, user):\n \"\"\"Callable for the reporting system.\"\"\"\n return (\n f\"Product {self.product_set.get_related_secured_queryset(user).count()}\\n\"\n f\"Component {self.component_set.count()}\\n\"\n f\"Subcomponent {self.subcomponent_set.count()}\\n\"\n f\"Package {self.package_set.count()}\\n\"\n f\"ProductComponent {self.productcomponent_set.count()}\\n\"\n f\"ProductPackage {self.productpackage_set.count()}\"\n )\n\n def get_license_tab_displayed_tags(self):\n \"\"\"\n Return a list of the assigned tags for the given License limiting\n the tags where the value is set to True.\n Tags that are not in a LicenseTagGroup are not included.\n\n Use `LicenseAssignedTag.prefetch_for_license_tab()` in prefect_related of the QuerySet.\n \"\"\"\n assigned_tag_qs = self.licenseassignedtag_set.filter(\n license_tag__licensetaggroupassignedtag__isnull=False\n ).order_by(\"license_tag__licensetaggroupassignedtag\")\n\n return [\n (assigned_tag.license_tag.label, assigned_tag.value, assigned_tag.license_tag.text)\n for assigned_tag in assigned_tag_qs\n # equivalent to \"filter(value=True)\" without triggering another Query\n if assigned_tag.value\n ]\n\n def get_tagset(self, include_unknown=False, include_no_group=False):\n \"\"\"\n Return a tagset for the given License.\n A \"tagset\" is a the collection of all the LicenseTags assigned to a\n License grouped by LicenseTagGroup and ordered by the Sequence.\n Groups are ordered by their sequence and tags are also ordered by\n their sequence inside a Group.\n LicenseAssignedTag with \"Unknown\" value can be included using the\n include_unknown parameter.\n Tag not assigned in a LicenseTagGroup can be included using the\n include_no_group parameter, an extra Group \"(No Group)\" will be added.\n \"tagset\" format is:\n OrderedDict(\n [('GroupName', [\n ('TagName', 'AssignedTagValue', 'TagText', Annotations),]\n )]\n )\n \"\"\"\n filters = {\"license\": self}\n if not include_unknown:\n filters[\"value__isnull\"] = False\n\n license_assigned_tags = (\n LicenseAssignedTag.objects.scope(self.dataspace)\n .filter(**filters)\n .select_related(\"license_tag\")\n .prefetch_related(\"licenseannotation_set\")\n )\n\n # Building a dictionary with the assigned tags of the current License\n license_tags_dict = {\n t.license_tag.label: (t.value, t.license_tag.text, t.licenseannotation_set.all())\n for t in license_assigned_tags\n }\n\n # Creating a 'tabset' dictionary ordered by Group and Tag sequence\n ordered_assigned_tags = (\n LicenseTagGroupAssignedTag.objects.scope(self.dataspace)\n .order_by(\"license_tag_group__seq\", \"seq\")\n .select_related(\"license_tag_group\", \"license_tag\")\n )\n\n # Using an OrderedDict to keep the QS ordering as we build the results\n license_tagset = OrderedDict()\n for assigned_tag in ordered_assigned_tags:\n label = assigned_tag.license_tag.label\n if label in license_tags_dict:\n # Using pop() to remove the entry from the dict, so we keep a\n # list of tags that are not assigned into a LicenseTagGroup\n value, text, annotations = license_tags_dict.pop(label)\n group_name = assigned_tag.license_tag_group.name\n license_tagset.setdefault(group_name, []).append([label, value, text, annotations])\n\n # If there is still entries in license_tags_dict, that mean those tags\n # are not assigned into a LicenseTagGroup, we are adding those in the\n # result if the include_no_group is True\n if include_no_group and license_tags_dict:\n leftover_tags = [[label] + list(values) for label, values in license_tags_dict.items()]\n license_tagset.update({\"(No Group)\": leftover_tags})\n\n return license_tagset\n\n def get_tag_labels(self):\n \"\"\"Return the labels of all the tags associated with this license.\"\"\"\n return self.tags.values_list(\"label\", flat=True)\n\n def get_tag_value_from_label(self, label):\n try:\n assigned_tag = LicenseAssignedTag.objects.get(license=self, license_tag__label=label)\n except (ObjectDoesNotExist, MultipleObjectsReturned):\n return \"\" # Empty string rather than Error when no value available\n return str(assigned_tag.value)\n\n def set_assigned_tags_from_license_profile(self):\n \"\"\"Update or create missing LicenseAssignedTag from the license_profile.\"\"\"\n if not self.license_profile:\n return\n\n for profile_assigned_tag in self.license_profile.licenseprofileassignedtag_set.all():\n LicenseAssignedTag.objects.update_or_create(\n license=self,\n license_tag=profile_assigned_tag.license_tag,\n dataspace=self.dataspace,\n defaults={\"value\": profile_assigned_tag.value},\n )\n\n @staticmethod\n def get_extra_relational_fields():\n return [\"annotations\", \"external_references\"]\n\n @property\n def scancode_url(self):\n return SCANCODE_LICENSE_URL.format(self.key)\n\n @property\n def licensedb_url(self):\n return SCANCODE_LICENSEDB_URL.format(self.key)\n\n @property\n def spdx_url(self):\n \"\"\"\n Return a URL to the https://spdx.org/licenses/ list using the short identifier.\n Return None for SPDX license key starting with \"LicenseRef-\" as those are not\n available in the SPDX list.\n \"\"\"\n if self.spdx_license_key and not self.spdx_license_key.startswith(\"LicenseRef-\"):\n return SPDX_LICENSE_URL.format(self.spdx_license_key)\n\n @property\n def spdx_link(self):\n \"\"\"\n Return a link base on the `spdx_url` value.\n Return the `spdx_license_key` when the URL is not available.\n \"\"\"\n spdx_url = self.spdx_url\n if spdx_url:\n return self.get_html_link(self.spdx_url, value=self.spdx_license_key, target=\"_blank\")\n return self.spdx_license_key\n\n @property\n def spdx_id(self):\n \"\"\"\n Return the `spdx_license_key` when available or a crafted LicenseRef using\n the license key.\n \"\"\"\n return self.spdx_license_key or f\"LicenseRef-dejacode-{self.key}\"\n\n def as_spdx(self):\n \"\"\"Return this License as an SPDX ExtractedLicensingInfo entry.\"\"\"\n return spdx.ExtractedLicensingInfo(\n license_id=self.spdx_id,\n extracted_text=self.full_text,\n name=self.name,\n see_alsos=self.get_all_urls(),\n )\n\n def get_all_urls(self):\n \"\"\"Return all URLs set in URL-based fields of this License instance.\"\"\"\n url_fields = [\n \"licensedb_url\",\n \"scancode_url\",\n \"homepage_url\",\n \"osi_url\",\n \"faq_url\",\n \"text_urls\",\n \"other_urls\",\n ]\n\n urls = []\n for url_field in url_fields:\n url_value = getattr(self, url_field)\n if url_value:\n urls.extend([url for url in url_value.split() if url])\n\n return sorted(set(urls))\n\n def has_tag_field_enabled(self, tag_field):\n # Make sure to include the following prefetch on the QuerySet:\n # prefetch_related('licenseassignedtag_set__license_tag')\n for assigned_tag in self.licenseassignedtag_set.all():\n if getattr(assigned_tag.license_tag, tag_field) and assigned_tag.value:\n return True\n return False\n\n @property\n def attribution_required(self):\n return self.has_tag_field_enabled(\"attribution_required\")\n\n @property\n def redistribution_required(self):\n return self.has_tag_field_enabled(\"redistribution_required\")\n\n @property\n def change_tracking_required(self):\n return self.has_tag_field_enabled(\"change_tracking_required\")\n\n @property\n def language_code(self):\n return self.language" }, { "identifier": "LicenseAssignedTag", "path": "license_library/models.py", "snippet": "class LicenseAssignedTag(DataspacedModel):\n license = models.ForeignKey(\n to=\"license_library.License\",\n on_delete=models.CASCADE,\n )\n\n license_tag = models.ForeignKey(\n to=\"license_library.LicenseTag\",\n on_delete=models.PROTECT,\n )\n\n value = models.BooleanField(\n null=True,\n help_text=\"Yes, No, Unknown\",\n )\n\n class Meta:\n ordering = [\"license\"]\n unique_together = ((\"license\", \"license_tag\"), (\"dataspace\", \"uuid\"))\n\n def __str__(self):\n return f\"{self.license_tag.label}: {self.value}\"\n\n def unique_filters_for(self, target):\n \"\"\"\n Return the unique filters data dict.\n Custom identifier for LicenseAssignedTag.\n Required as there is no unique_together other than the uuid.\n \"\"\"\n return {\n \"license__uuid\": self.license.uuid,\n \"license_tag__uuid\": self.license_tag.uuid,\n \"dataspace\": target,\n }\n\n @staticmethod\n def prefetch_for_license_tab():\n assigned_tags_qs = LicenseAssignedTag.objects.order_by(\n \"license_tag__licensetaggroupassignedtag\"\n ).select_related(\"license_tag\")\n return models.Prefetch(\"licenses__licenseassignedtag_set\", queryset=assigned_tags_qs)" }, { "identifier": "LicenseCategory", "path": "license_library/models.py", "snippet": "class LicenseCategory(DataspacedModel):\n label = models.CharField(\n max_length=50,\n help_text=_(\"The descriptive name of a License Category.\"),\n )\n\n text = models.TextField(\n help_text=_(\"Descriptive, explanatory text about a License Category.\"),\n )\n\n LICENSE_TYPES = (\n (\"Open Source\", \"Open Source\"),\n (\"Closed Source\", \"Closed Source\"),\n )\n\n license_type = models.CharField(\n max_length=100,\n null=True,\n blank=True,\n choices=LICENSE_TYPES,\n db_index=True,\n help_text=_(\n \"A License Type identifies the high level nature of a License \"\n \"Category: Open Source or Closed Source.\"\n ),\n )\n\n class Meta:\n unique_together = ((\"dataspace\", \"label\"), (\"dataspace\", \"uuid\"))\n ordering = [\"label\"]\n verbose_name_plural = _(\"license categories\")\n\n def __str__(self):\n return self.label" }, { "identifier": "LicenseProfile", "path": "license_library/models.py", "snippet": "class LicenseProfile(DataspacedModel):\n name = models.CharField(\n max_length=50,\n help_text=format_lazy(\n \"A descriptive name for the {verbose_name}.\",\n verbose_name=_(\"License profile\"),\n ),\n )\n\n tags = models.ManyToManyField(\n to=\"license_library.LicenseTag\",\n through=\"LicenseProfileAssignedTag\",\n )\n\n examples = models.TextField(\n blank=True,\n help_text=format_lazy(\n \"Free-form text to identify examples of licenses that illustrate this {verbose_name}.\",\n verbose_name=_(\"License profile\"),\n ),\n )\n\n notes = models.TextField(\n blank=True,\n help_text=format_lazy(\n \"Extended notes about a {verbose_name} (for example, to explain some kind of \"\n \"special obligation).\",\n verbose_name=_(\"License profile\"),\n ),\n )\n\n class Meta:\n unique_together = ((\"dataspace\", \"name\"), (\"dataspace\", \"uuid\"))\n ordering = [\"name\"]\n verbose_name = _(\"license profile\")\n\n def __str__(self):\n return self.name\n\n def get_assigned_tags_html(self):\n template = \"\"\"\n <div class=\"media\">\n <img alt=\"{}\" src=\"{}\">\n <p>{}</p>\n </div>\"\"\"\n\n tags = []\n for obj in self.licenseprofileassignedtag_set.all():\n img = static(\"img/icon-no-gray.png\")\n if bool(obj.value):\n img = static(\"img/icon-yes.png\")\n tags.append(template.format(obj.value, img, obj.license_tag.label))\n\n return format_html('<div class=\"assigned_tags\">{}</div>', mark_safe(\"\".join(tags)))\n\n get_assigned_tags_html.short_description = \"Assigned tags\"" }, { "identifier": "LicenseStyle", "path": "license_library/models.py", "snippet": "class LicenseStyle(DataspacedModel):\n name = models.CharField(\n max_length=50,\n help_text=_(\"A descriptive name for the License Style.\"),\n )\n\n notes = models.TextField(\n blank=True,\n help_text=_(\n \"Additional explanation of the License Style, such as the nature of any \"\n \"license choices.\"\n ),\n )\n\n class Meta:\n unique_together = ((\"dataspace\", \"name\"), (\"dataspace\", \"uuid\"))\n ordering = [\"name\"]\n\n def __str__(self):\n return self.name" }, { "identifier": "LicenseTag", "path": "license_library/models.py", "snippet": "class LicenseTag(DataspacedModel):\n label = models.CharField(\n max_length=50,\n help_text=_(\n \"Organization-defined Label to identify a Tag that can be applied to a \" \"Tag Group.\"\n ),\n )\n\n text = models.TextField(\n help_text=_(\n \"Text to describe a Tag that can be applied to a Tag Group by an Organization.\"\n ),\n )\n\n guidance = models.TextField(\n blank=True,\n help_text=_(\n \"Detailed description of the criteria for setting the Tag assigned value, \"\n \"including examples (snippets) of representative license text that supports \"\n \"the determination of the License Tag assigned value.\"\n ),\n )\n\n default_value = models.BooleanField(\n null=True,\n help_text=_(\"Yes, No, Unknown\"),\n )\n\n show_in_license_list_view = models.BooleanField(\n default=False,\n help_text=format_lazy(\n \"When true (checked), include this Tag (both label and value) in the {license_app} \"\n \"Viewer. Intended for the most critical Tags only, such as those associated with \"\n \"source code redistribution and patent impact.\",\n license_app=_(\"License Library\"),\n ),\n )\n\n attribution_required = models.BooleanField(\n default=False,\n help_text=_(\n \"When true (checked), a license with this Tag requires attribution in the source \"\n \"code or the documentation of the product where the licensed software is being used, \"\n \"or both.\"\n ),\n )\n\n redistribution_required = models.BooleanField(\n default=False,\n help_text=_(\n \"When true (checked), a license with this Tag requires the product documentation to \"\n \"include instructions regarding how to obtain source code for the licensed software, \"\n \"including any modifications to it.\"\n ),\n )\n\n change_tracking_required = models.BooleanField(\n default=False,\n help_text=_(\n \"When true (checked), a license with this Tag requires any modifications to licensed \"\n \"software to be documented in the source code, the associated product documentation, \"\n \"or both.\"\n ),\n )\n\n class Meta:\n unique_together = ((\"dataspace\", \"label\"), (\"dataspace\", \"uuid\"))\n ordering = [\"label\"]\n\n def __str__(self):\n return self.label\n\n def get_slug_label(self):\n return \"tag__\" + self.label.lower().replace(\" \", \"_\").replace(\"-\", \"_\")" }, { "identifier": "UsagePolicy", "path": "policy/models.py", "snippet": "class UsagePolicy(ColoredIconMixin, DataspacedModel):\n CONTENT_TYPES = (\n models.Q(app_label=\"component_catalog\", model=\"component\")\n | models.Q(app_label=\"component_catalog\", model=\"subcomponent\")\n | models.Q(app_label=\"component_catalog\", model=\"package\")\n | models.Q(app_label=\"license_library\", model=\"license\")\n )\n\n label = models.CharField(\n max_length=50,\n help_text=_(\n \"Label is the text that you want to present to application \"\n \"users to describe a specific Usage Policy as it applies \"\n \"to an application object.\"\n ),\n )\n\n guidelines = models.TextField(\n blank=True,\n help_text=_(\n \"Guidelines explain the organization definition of a usage \"\n \"policy (approval level) and can also provide detailed \"\n \"requirements for compliance.\"\n ),\n )\n\n content_type = models.ForeignKey(\n to=ContentType,\n on_delete=models.PROTECT,\n verbose_name=_(\"object type\"),\n limit_choices_to=CONTENT_TYPES,\n help_text=_(\n \"Object type identifies the application object (License, \"\n \"Component, Subcomponent relationship, Package) to which the \"\n \"Usage Policy will apply.\"\n ),\n )\n\n class Compliance(models.TextChoices):\n WARNING = \"warning\", _(\"Warning\")\n ERROR = \"error\", _(\"Error\")\n\n compliance_alert = models.CharField(\n max_length=20,\n blank=True,\n choices=Compliance.choices,\n help_text=_(\n \"Indicates how the usage of a DejaCode object (license, component, \"\n \"package, etc.) complies with organizational policy. \"\n 'Value choices include \"Pass\" (or empty, the default value), '\n '\"Warning\" (should be reviewed), and \"Error\" '\n \"(fails compliance policy guidelines).\"\n ),\n )\n\n associated_product_relation_status = models.ForeignKey(\n to=\"product_portfolio.ProductRelationStatus\",\n null=True,\n blank=True,\n on_delete=models.PROTECT,\n help_text=_(\n \"An associated product relation status enables you to specify the product \"\n \"relation status to use automatically when a component or package with an \"\n \"assigned usage policy is added to a product, overriding the general \"\n \"default defined in the product relation status table.\"\n ),\n )\n\n class Meta:\n unique_together = ((\"dataspace\", \"content_type\", \"label\"), (\"dataspace\", \"uuid\"))\n ordering = [\"content_type\", \"label\"]\n verbose_name_plural = _(\"usage policies\")\n\n def __str__(self):\n return self.label\n\n def str_with_content_type(self):\n return f\"{self.label} ({self.content_type.model})\"\n\n @classmethod\n def get_identifier_fields(cls):\n \"\"\"Hack required by the Component import.\"\"\"\n return [\"label\"]\n\n def get_object_set(self):\n \"\"\"Return the QuerySet of objects using this policy.\"\"\"\n return self.content_type.model_class().objects.filter(usage_policy=self)\n\n def get_associated_policy_to_model(self, model):\n with suppress(models.ObjectDoesNotExist, MultipleObjectsReturned):\n return self.to_policies.to_model(model).get().to_policy\n\n def as_dict(self):\n return {\n \"label\": self.label,\n \"color_code\": self.get_color_code(),\n \"icon\": self.icon,\n \"compliance_alert\": self.compliance_alert,\n }" } ]
from django.contrib.auth.mixins import LoginRequiredMixin from django.db.models import Prefetch from django.http import FileResponse from django.template.defaultfilters import force_escape from django.urls import reverse from django.utils.functional import cached_property from django.utils.translation import gettext_lazy as _ from django.views.generic import DetailView from dje.client_data import add_client_data from dje.templatetags.dje_tags import urlize_target_blank from dje.urn_resolver import URN_HELP_TEXT from dje.utils import get_help_text as ght from dje.views import AcceptAnonymousMixin from dje.views import AdminLinksDropDownMixin from dje.views import DataspacedFilterView from dje.views import DataspaceScopeMixin from dje.views import Header from dje.views import ObjectDetailsView from dje.views import TabField from license_library.filters import LicenseFilterSet from license_library.models import License from license_library.models import LicenseAssignedTag from license_library.models import LicenseCategory from license_library.models import LicenseProfile from license_library.models import LicenseStyle from license_library.models import LicenseTag from policy.models import UsagePolicy
14,749
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # def include_license_type(view_instance): return view_instance.dataspace.show_license_type_in_license_list_view def include_license_profile(view_instance): return view_instance.dataspace.show_license_profile_in_license_list_view def include_policy(view_instance): return view_instance.dataspace.show_usage_policy_in_user_views LICENSE_NAME_HELP = _( "The full name of the license, as provided by the original authors, along with the " "commonly used license short name and the license key required by license expressions." ) class LicenseListView( AcceptAnonymousMixin, AdminLinksDropDownMixin, DataspacedFilterView, ): model = License filterset_class = LicenseFilterSet template_name = "license_library/license_list.html" template_list_table = "license_library/includes/license_list_table.html" include_reference_dataspace = True put_results_in_session = True table_headers = (
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # def include_license_type(view_instance): return view_instance.dataspace.show_license_type_in_license_list_view def include_license_profile(view_instance): return view_instance.dataspace.show_license_profile_in_license_list_view def include_policy(view_instance): return view_instance.dataspace.show_usage_policy_in_user_views LICENSE_NAME_HELP = _( "The full name of the license, as provided by the original authors, along with the " "commonly used license short name and the license key required by license expressions." ) class LicenseListView( AcceptAnonymousMixin, AdminLinksDropDownMixin, DataspacedFilterView, ): model = License filterset_class = LicenseFilterSet template_name = "license_library/license_list.html" template_list_table = "license_library/includes/license_list_table.html" include_reference_dataspace = True put_results_in_session = True table_headers = (
Header("name", _("License name"), help_text=LICENSE_NAME_HELP, filter="in_spdx_list"),
8
2023-12-07 16:57:42+00:00
24k
wusize/CLIM
src/open_clip/model.py
[ { "identifier": "HFTextEncoder", "path": "src/open_clip/hf_model.py", "snippet": "class HFTextEncoder(nn.Module):\n \"\"\"HuggingFace model adapter\"\"\"\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n model_name_or_path: str,\n output_dim: int,\n config: PretrainedConfig = None,\n pooler_type: str = None,\n proj: str = None,\n pretrained: bool = True,\n output_tokens: bool = False,\n ):\n super().__init__()\n self.output_tokens = output_tokens\n self.output_dim = output_dim\n\n # TODO: find better way to get this information\n uses_transformer_pooler = (pooler_type == \"cls_pooler\")\n\n if transformers is None:\n raise RuntimeError(\"Please `pip install transformers` to use pre-trained HuggingFace models\")\n if config is None:\n self.config = AutoConfig.from_pretrained(model_name_or_path)\n create_func, model_args = (AutoModel.from_pretrained, model_name_or_path) if pretrained else (\n AutoModel.from_config, self.config)\n # TODO: do all model configs have this attribute? PretrainedConfig does so yes??\n if hasattr(self.config, \"is_encoder_decoder\") and self.config.is_encoder_decoder:\n self.transformer = create_func(model_args)\n self.transformer = self.transformer.encoder\n else:\n self.transformer = create_func(model_args, add_pooling_layer=uses_transformer_pooler)\n else:\n self.config = config\n self.transformer = AutoModel.from_config(config)\n if pooler_type is None: # get default arch pooler\n pooler_type = (arch_dict[self.config.model_type][\"pooler\"])\n \n self.pooler = _POOLERS[pooler_type]()\n\n d_model = getattr(self.config, arch_dict[self.config.model_type][\"config_names\"][\"width\"])\n if (d_model == output_dim) and (proj is None): # do we always need a proj?\n self.proj = nn.Identity()\n elif proj == 'linear':\n self.proj = nn.Linear(d_model, output_dim, bias=False)\n elif proj == 'mlp':\n hidden_size = (d_model + output_dim) // 2\n self.proj = nn.Sequential(\n nn.Linear(d_model, hidden_size, bias=False),\n nn.GELU(),\n nn.Linear(hidden_size, output_dim, bias=False),\n )\n\n def forward(self, x: TensorType):\n attn_mask = (x != self.config.pad_token_id).long()\n out = self.transformer(input_ids=x, attention_mask=attn_mask)\n pooled_out = self.pooler(out, attn_mask)\n projected = self.proj(pooled_out)\n\n seq_len = out.last_hidden_state.shape[1]\n tokens = (\n out.last_hidden_state[:, torch.arange(seq_len) != self.pooler.cls_token_position, :] \n if type(self.pooler) == ClsPooler \n else out.last_hidden_state\n )\n \n if self.output_tokens:\n return projected, tokens\n return projected\n\n def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):\n if not unlocked_layers: # full freezing\n for n, p in self.transformer.named_parameters():\n p.requires_grad = (not freeze_layer_norm) if \"LayerNorm\" in n.split(\".\") else False\n return\n\n encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer\n layer_list = getattr(encoder, arch_dict[self.config.model_type][\"config_names\"][\"layer_attr\"])\n print(f\"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model\")\n embeddings = getattr(\n self.transformer, arch_dict[self.config.model_type][\"config_names\"][\"token_embeddings_attr\"])\n modules = [embeddings, *layer_list][:-unlocked_layers]\n # freeze layers\n for module in modules:\n for n, p in module.named_parameters():\n p.requires_grad = (not freeze_layer_norm) if \"LayerNorm\" in n.split(\".\") else False\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.gradient_checkpointing_enable()\n\n def init_parameters(self):\n pass" }, { "identifier": "ModifiedResNet", "path": "src/open_clip/modified_resnet.py", "snippet": "class ModifiedResNet(nn.Module):\n \"\"\"\n A ResNet class that is similar to torchvision's but contains the following changes:\n - There are now 3 \"stem\" convolutions as opposed to 1, with an average pool instead of a max pool.\n - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1\n - The final pooling layer is a QKV attention instead of an average pool\n \"\"\"\n\n def __init__(self, layers, output_dim, heads, image_size=224, width=64,\n freeze_output=True,\n freeze_all_bns=True):\n super().__init__()\n self.output_dim = output_dim\n self.image_size = image_size\n self.freeze_output = freeze_output\n self.freeze_all_bns = freeze_all_bns\n # the 3-layer stem\n self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(width // 2)\n self.act1 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(width // 2)\n self.act2 = nn.ReLU(inplace=True)\n self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(width)\n self.act3 = nn.ReLU(inplace=True)\n self.avgpool = nn.AvgPool2d(2)\n\n # residual layers\n self._inplanes = width # this is a *mutable* variable used during construction\n self.layer1 = self._make_layer(width, layers[0])\n self.layer2 = self._make_layer(width * 2, layers[1], stride=2)\n self.layer3 = self._make_layer(width * 4, layers[2], stride=2)\n self.layer4 = self._make_layer(width * 8, layers[3], stride=2)\n\n embed_dim = width * 32 # the ResNet feature dimension\n self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim, freeze_output)\n self.attnpool_input_size = image_size // 32\n\n def _make_layer(self, planes, blocks, stride=1):\n layers = [Bottleneck(self._inplanes, planes, stride)]\n\n self._inplanes = planes * Bottleneck.expansion\n for _ in range(1, blocks):\n layers.append(Bottleneck(self._inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=True):\n assert freeze_bn_stats\n def _lock(module):\n for param in module.parameters():\n param.requires_grad = False\n if freeze_bn_stats:\n freeze_batch_norm_2d(module)\n module.eval()\n\n freeze_at = 5 - unlocked_groups\n print(f'Freeze the resnet at {freeze_at}', flush=True)\n\n if freeze_at >= 1: # stem\n _lock(self.conv1)\n _lock(self.bn1)\n _lock(self.conv2)\n _lock(self.bn2)\n _lock(self.conv3)\n _lock(self.bn3)\n # each stage is a torch.nn.modules.container.Sequential\n for idx, stage in enumerate([self.layer1, self.layer2, self.layer3, self.layer4], start=2):\n if freeze_at >= idx:\n for block in stage.children(): # each block is a Bottleneck\n _lock(block)\n if self.freeze_all_bns:\n print(f'Freeze all bn layers', flush=True) # TODO: study if this is necessary\n freeze_batch_norm_2d(self)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n # FIXME support for non-transformer\n pass\n\n def stem(self, x):\n x = self.act1(self.bn1(self.conv1(x)))\n x = self.act2(self.bn2(self.conv2(x)))\n x = self.act3(self.bn3(self.conv3(x)))\n x = self.avgpool(x)\n return x\n\n def forward(self, x):\n with torch.no_grad():\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.attnpool(x)\n\n return x\n\n @staticmethod\n def _denormalize_boxes(normed_boxes, x):\n h, w = x.shape[-2:]\n denormed_boxes = []\n for boxes in normed_boxes:\n new_boxes = boxes.clone() # FIXME: do not change the value in normed_boxes!\n new_boxes[:, [0, 2]] *= w\n new_boxes[:, [1, 3]] *= h\n denormed_boxes.append(new_boxes)\n return denormed_boxes\n\n def extract_roi_features(self, x, normed_boxes, extract_type='v2'):\n if extract_type == 'v1':\n return self._extract_roi_features_v1(x, normed_boxes)\n else:\n assert extract_type == 'v2'\n return self._extract_roi_features_v2(x, normed_boxes)\n\n def mask_attn_pool(self, image, masks):\n return self.mask_pool(image, masks)\n\n def mask_pool(self, image, masks):\n x = self.stem(image)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n feature_map = self.attnpool.forward_dense(x)\n feature_map = F.normalize(feature_map, dim=1) # remember to normalize!\n\n feature_map = feature_map.flatten(-2, -1) # bs, c, h*w\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n features = (feature_map * masks[:, None]).sum(-1) / (masks.sum(1, keepdim=True) + 1e-12)\n\n return features\n\n def _extract_roi_features_v1(self, x, normed_boxes, **kwargs):\n with torch.no_grad():\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.attnpool.forward_dense(x)\n x = F.normalize(x, dim=1) # remember to normalize!\n # TODO: debug\n roi_feats = roi_align(x, self._denormalize_boxes(normed_boxes, x),\n (1, 1), 1.0, -1, True)[:, :, 0, 0]\n return roi_feats\n\n def _extract_roi_features_v2(self, x, normed_boxes, **kwargs):\n with torch.no_grad():\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x) # only the last layer is finetuned in our implementation\n\n tar_size = self.attnpool_input_size\n # TODO: debug\n roi_feats = roi_align(x, self._denormalize_boxes(normed_boxes, x),\n (tar_size, tar_size), 1.0, -1, True)\n\n roi_feats = self.attnpool(roi_feats)\n\n return roi_feats\n\n def encode_dense(self, x, keep_shape=True):\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n feature_map = self.attnpool.forward_dense(x)\n feature_map = F.normalize(feature_map, dim=1) # remember to normalize!\n\n return feature_map" }, { "identifier": "TimmModel", "path": "src/open_clip/timm_model.py", "snippet": "class TimmModel(nn.Module):\n \"\"\" timm model adapter\n \"\"\"\n\n def __init__(\n self,\n model_name,\n embed_dim,\n image_size=224,\n pool='avg',\n proj='linear',\n proj_bias=False,\n drop=0.,\n drop_path=None,\n patch_drop=None,\n pretrained=False,\n ):\n super().__init__()\n if timm is None:\n raise RuntimeError(\"Please `pip install timm` to use timm models.\")\n self.image_size = to_2tuple(image_size)\n\n # setup kwargs that may not be common across all models\n timm_kwargs = {}\n if drop_path is not None:\n timm_kwargs['drop_path_rate'] = drop_path\n if patch_drop is not None:\n timm_kwargs['patch_drop_rate'] = patch_drop\n\n custom_pool = pool in ('abs_attn', 'rot_attn')\n if not proj and not custom_pool:\n # use network classifier head as projection if no proj specified and no custom pooling used\n self.trunk = timm.create_model(\n model_name,\n num_classes=embed_dim,\n global_pool=pool,\n pretrained=pretrained,\n **timm_kwargs,\n )\n prev_chs = embed_dim\n else:\n self.trunk = timm.create_model(\n model_name,\n pretrained=pretrained,\n **timm_kwargs,\n )\n feat_size = self.trunk.default_cfg.get('pool_size', None)\n feature_ndim = 1 if not feat_size else 2\n if custom_pool:\n assert feature_ndim == 2\n # if attn pooling used, remove both classifier and default pool\n self.trunk.reset_classifier(0, global_pool='')\n else:\n # reset global pool if pool config set, otherwise leave as network default\n reset_kwargs = dict(global_pool=pool) if pool else {}\n self.trunk.reset_classifier(0, **reset_kwargs)\n prev_chs = self.trunk.num_features\n\n head_layers = OrderedDict()\n\n # Add custom pooling to head\n if pool == 'abs_attn':\n head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)\n prev_chs = embed_dim\n elif pool == 'rot_attn':\n head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)\n prev_chs = embed_dim\n\n # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used\n if proj == 'linear':\n head_layers['drop'] = nn.Dropout(drop)\n head_layers['proj'] = nn.Linear(prev_chs, embed_dim, bias=proj_bias)\n elif proj == 'mlp':\n head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=(drop, 0), bias=(True, proj_bias))\n else:\n assert not proj, f'Unknown projection type {proj}.'\n\n self.head = nn.Sequential(head_layers)\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=False):\n \"\"\" lock modules\n Args:\n unlocked_groups (int): leave last n layer groups unlocked (default: 0)\n \"\"\"\n if not unlocked_groups:\n # lock full model\n for param in self.trunk.parameters():\n param.requires_grad = False\n if freeze_bn_stats:\n freeze_batch_norm_2d(self.trunk)\n else:\n # NOTE: partial freeze requires latest timm (master) branch and is subject to change\n try:\n # FIXME import here until API stable and in an official release\n from timm.models.helpers import group_parameters, group_modules\n except ImportError:\n raise RuntimeError(\n 'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')\n matcher = self.trunk.group_matcher()\n gparams = group_parameters(self.trunk, matcher)\n max_layer_id = max(gparams.keys())\n max_layer_id = max_layer_id - unlocked_groups\n for group_idx in range(max_layer_id + 1):\n group = gparams[group_idx]\n for param in group:\n self.trunk.get_parameter(param).requires_grad = False\n if freeze_bn_stats:\n gmodules = group_modules(self.trunk, matcher, reverse=True)\n gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}\n freeze_batch_norm_2d(self.trunk, gmodules)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n try:\n self.trunk.set_grad_checkpointing(enable)\n except Exception as e:\n logging.warning('grad checkpointing not supported for this timm image tower, continuing without...')\n\n def forward(self, x):\n x = self.trunk(x)\n x = self.head(x)\n return x\n\n @staticmethod\n def _denormalize_boxes(normed_boxes, x):\n h, w = x.shape[-2:]\n denormed_boxes = []\n for boxes in normed_boxes:\n new_boxes = boxes.clone() # FIXME: do not change the value in normed_boxes!\n new_boxes[:, [0, 2]] *= w\n new_boxes[:, [1, 3]] *= h\n denormed_boxes.append(new_boxes)\n return denormed_boxes\n\n def _extract_roi_features_v1(self, x, normed_boxes, **kwargs):\n h, w = x.shape[-2:]\n x = self.trunk.forward_features(x)\n h_f, w_f = x.shape[-2:]\n tar_h = (self.image_size[0] * h_f) // h\n tar_w = (self.image_size[1] * w_f) // w\n x = roi_align(x, self._denormalize_boxes(normed_boxes, x), (tar_h, tar_w),\n 1.0, -1, True)\n\n x = self.trunk.forward_head(x)\n x = self.head(x)\n\n return x\n\n def encode_dense(self, x, **kwargs):\n x = self.trunk.forward_features(x)\n x = self.dense_trunk_head(x)\n x = self.head(x)\n x = x.permute(0, 3, 1, 2)\n\n return x\n\n def dense_trunk_head(self, x):\n x = self.trunk.head.norm(x)\n x = x.permute(0, 2, 3, 1)\n x = self.trunk.head.drop(x)\n # x = x.permute(0, 3, 1, 2)\n\n return x\n\n def mask_pool(self, image, masks):\n feature_map = self.encode_dense(image)\n feature_map = F.normalize(feature_map, dim=1) # remember to normalize!\n feature_map = feature_map.flatten(-2, -1) # bs, c, h*w\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n features = (feature_map * masks[:, None]).sum(-1) / (masks.sum(1, keepdim=True) + 1e-12)\n\n return features\n\n def extract_roi_features(self, x, normed_boxes, extract_type='v1'):\n assert extract_type == \"v1\"\n if extract_type == 'v1':\n return self._extract_roi_features_v1(x, normed_boxes)\n else:\n assert extract_type == 'v2'\n return self._extract_roi_features_v2(x, normed_boxes)\n\n def _extract_roi_features_v2(self, x, normed_boxes, **kwargs):\n x = self.encode_dense(x)\n x = F.normalize(x, dim=1) # remember to normalize!\n\n roi_feats = roi_align(x, self._denormalize_boxes(normed_boxes, x), (1, 1),\n 1.0, -1, True)[..., 0, 0]\n return roi_feats\n\n def encode_rois_and_image(self, x, normed_boxes, **kwargs):\n h, w = x.shape[-2:]\n x = self.trunk.forward_features(x)\n h_f, w_f = x.shape[-2:]\n tar_h = (self.image_size[0] * h_f) // h\n tar_w = (self.image_size[1] * w_f) // w\n x_image = x\n x_rois = roi_align(x, self._denormalize_boxes(normed_boxes, x), (tar_h, tar_w),\n 1.0, -1, True)\n\n x_rois = self.trunk.forward_head(x_rois)\n x_rois = self.head(x_rois)\n x_rois = F.normalize(x_rois, dim=-1)\n\n x_image = self.trunk.forward_head(x_image)\n x_image = self.head(x_image)\n x_image = F.normalize(x_image, dim=-1)\n\n return x_rois, x_image" }, { "identifier": "LayerNormFp32", "path": "src/open_clip/transformer.py", "snippet": "class LayerNormFp32(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back).\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps)\n return x.to(orig_type)" }, { "identifier": "LayerNorm", "path": "src/open_clip/transformer.py", "snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm (with cast back to input dtype).\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n return x.to(orig_type)" }, { "identifier": "QuickGELU", "path": "src/open_clip/transformer.py", "snippet": "class QuickGELU(nn.Module):\n # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory\n def forward(self, x: torch.Tensor):\n return x * torch.sigmoid(1.702 * x)" }, { "identifier": "Attention", "path": "src/open_clip/transformer.py", "snippet": "class Attention(nn.Module):\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=True,\n scaled_cosine=False,\n scale_heads=False,\n logit_scale_max=math.log(1. / 0.01),\n attn_drop=0.,\n proj_drop=0.\n ):\n super().__init__()\n self.scaled_cosine = scaled_cosine\n self.scale_heads = scale_heads\n assert dim % num_heads == 0, 'dim should be divisible by num_heads'\n self.num_heads = num_heads\n self.head_dim = dim // num_heads\n self.scale = self.head_dim ** -0.5\n self.logit_scale_max = logit_scale_max\n\n # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original\n self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)\n if qkv_bias:\n self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))\n else:\n self.in_proj_bias = None\n\n if self.scaled_cosine:\n self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))\n else:\n self.logit_scale = None\n self.attn_drop = nn.Dropout(attn_drop)\n if self.scale_heads:\n self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))\n else:\n self.head_scale = None\n self.out_proj = nn.Linear(dim, dim)\n self.out_drop = nn.Dropout(proj_drop)\n\n def forward(self, x, attn_mask: Optional[torch.Tensor] = None):\n L, N, C = x.shape\n q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)\n q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n\n if self.logit_scale is not None:\n attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))\n logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()\n attn = attn.view(N, self.num_heads, L, L) * logit_scale\n attn = attn.view(-1, L, L)\n else:\n q = q * self.scale\n attn = torch.bmm(q, k.transpose(-1, -2))\n\n if attn_mask is not None:\n if attn_mask.dtype == torch.bool:\n new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)\n new_attn_mask.masked_fill_(attn_mask, float(\"-inf\"))\n attn_mask = new_attn_mask\n attn += attn_mask\n\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = torch.bmm(attn, v)\n if self.head_scale is not None:\n x = x.view(N, self.num_heads, L, C) * self.head_scale\n x = x.view(-1, L, C)\n x = x.transpose(0, 1).reshape(L, N, C)\n x = self.out_proj(x)\n x = self.out_drop(x)\n return x" }, { "identifier": "VisionTransformer", "path": "src/open_clip/transformer.py", "snippet": "class VisionTransformer(nn.Module):\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n image_size: int,\n patch_size: int,\n width: int,\n layers: int,\n heads: int,\n mlp_ratio: float,\n ls_init_value: float = None,\n global_average_pool: bool = False,\n attentional_pool: bool = False,\n n_queries: int = 256,\n attn_pooler_heads: int = 8,\n output_dim: int = 512,\n patch_dropout: float = 0.,\n input_patchnorm: bool = False,\n act_layer: Callable = nn.GELU,\n norm_layer: Callable = LayerNorm,\n output_tokens: bool = False\n ):\n super().__init__()\n self.output_tokens = output_tokens\n image_height, image_width = self.image_size = to_2tuple(image_size)\n patch_height, patch_width = self.patch_size = to_2tuple(patch_size)\n self.grid_size = (image_height // patch_height, image_width // patch_width)\n self.output_dim = output_dim\n\n # whether to layernorm each patch, as done in dual patchnorm paper - https://arxiv.org/abs/2302.01327v1\n self.input_patchnorm = input_patchnorm\n assert not input_patchnorm\n if input_patchnorm:\n patch_input_dim = patch_height * patch_width * 3\n self.patchnorm_pre_ln = LayerNorm(patch_input_dim)\n self.conv1 = nn.Linear(patch_input_dim, width)\n else:\n self.patchnorm_pre_ln = nn.Identity()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n # class embeddings and positional embeddings\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))\n\n # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn\n self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()\n\n self.ln_pre = norm_layer(width)\n self.transformer = Transformer(\n width,\n layers,\n heads,\n mlp_ratio,\n ls_init_value=ls_init_value,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n self.num_heads = heads\n\n self.global_average_pool = global_average_pool\n if attentional_pool:\n self.attn_pool = AttentionalPooler(output_dim, width, n_head=attn_pooler_heads, n_queries=n_queries)\n self.ln_post = norm_layer(output_dim)\n self.proj = nn.Parameter(scale * torch.randn(output_dim, output_dim))\n else:\n self.attn_pool = None\n self.ln_post = norm_layer(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.init_parameters()\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=False):\n for param in self.parameters():\n param.requires_grad = False\n\n if unlocked_groups != 0:\n groups = [\n [\n self.conv1,\n self.class_embedding,\n self.ln_pre,\n ],\n self.positional_embedding,\n *self.transformer.resblocks[:-1],\n [\n self.transformer.resblocks[-1],\n # self.ln_post, # fix layer norm\n ],\n # self.proj, # fix output layers\n ]\n\n def _unlock(x):\n if isinstance(x, Sequence):\n for g in x:\n _unlock(g)\n else:\n if isinstance(x, torch.nn.Parameter):\n x.requires_grad = True\n else:\n for p in x.parameters():\n p.requires_grad = True\n\n _unlock(groups[-unlocked_groups:])\n\n def attention_lock(self, **kwargs):\n for name, params in self.named_parameters():\n params.requires_grad = True if \"attn\" in name or \"position\" in name else False\n\n def init_parameters(self):\n # FIXME OpenAI CLIP did not define an init for the VisualTransformer\n # TODO experiment if default PyTorch init, below, or alternate init is best.\n pass\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.grad_checkpointing = enable\n\n def _global_pool(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.global_average_pool:\n return x.mean(dim=1), x\n else:\n return x[:, 0], x[:, 1:]\n\n def forward(self, x: torch.Tensor):\n\n # to patches - whether to use dual patchnorm - https://arxiv.org/abs/2302.01327v1\n # if self.input_patchnorm:\n # # einops - rearrange(x, 'b c (h p1) (w p2) -> b (h w) (c p1 p2)')\n # x = x.reshape(x.shape[0], x.shape[1], self.grid_size[0], self.patch_size[0], self.grid_size[1], self.patch_size[1])\n # x = x.permute(0, 2, 4, 1, 3, 5)\n # x = x.reshape(x.shape[0], self.grid_size[0] * self.grid_size[1], -1)\n # x = self.patchnorm_pre_ln(x)\n # x = self.conv1(x)\n # else:\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n # TODO: Allow interpolating the positional embeddings\n\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n pooled, tokens = self._global_pool(x)\n else:\n pooled, tokens = self._global_pool(x)\n pooled = self.ln_post(pooled)\n\n if self.proj is not None:\n pooled = pooled @ self.proj\n\n if self.output_tokens:\n return pooled, tokens\n \n return pooled\n\n def post_attention(self, x):\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n pooled, tokens = self._global_pool(x)\n else:\n pooled, tokens = self._global_pool(x)\n pooled = self.ln_post(pooled)\n\n if self.proj is not None:\n pooled = pooled @ self.proj\n\n if self.output_tokens:\n return pooled, tokens\n\n return pooled\n\n def extract_roi_features(self, x, normed_boxes, extract_type='v2'):\n if extract_type == 'v1':\n return self._extract_roi_features_v1(x, normed_boxes)\n elif extract_type == 'v2':\n return self._extract_roi_features_v2(x, normed_boxes)\n else:\n raise NotImplementedError\n # assert extract_type == 'v3'\n # return self._extract_roi_features_v3(x, normed_boxes)\n\n def mask_pool(self, x, masks):\n feature_map = self.encode_dense(x)\n feature_map = F.normalize(feature_map, dim=-1)\n\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n features = (feature_map * masks.unsqueeze(-1)).sum(1) / (masks.sum(1, keepdim=True) + 1e-12)\n\n return features\n\n def mask_features(self, x, masks):\n feature_map = self.encode_dense(x)\n feature_map = F.normalize(feature_map, dim=-1)\n\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).flatten(-2, -1) > 0 # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n\n mask_features = [f[m] for m, f in zip(masks, feature_map)]\n\n return mask_features\n\n def encode_dense(self, x, keep_shape=False):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n # assert h == w # TODO: support input of any shape, need to change the normed boxes to real boxes\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer.extract_feature_map(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n _, tokens = self._global_pool(x)\n else:\n _, tokens = self._global_pool(x)\n tokens = self.ln_post(tokens)\n\n if self.proj is not None:\n tokens = tokens @ self.proj\n\n feature_map = tokens.view(bs, h * w, -1) # .permute(0, 3, 1, 2)\n feature_map = F.normalize(feature_map, dim=-1) # normalize at the last dimension\n if keep_shape:\n feature_map = feature_map.view(bs, h, w, -1).permute(0, 3, 1, 2)\n return feature_map\n\n def mask_crop(self, x, masks):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).to(x) # bs, h, w\n x = torch.repeat_interleave(\n x, torch.tensor(num_masks_per_image, device=x.device), dim=0)\n x = x * masks[:, None]\n bs, _, h, w = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n # TODO: Allow interpolating the positional embeddings\n\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n pooled, tokens = self._global_pool(x)\n else:\n pooled, tokens = self._global_pool(x)\n pooled = self.ln_post(pooled)\n\n if self.proj is not None:\n pooled = pooled @ self.proj\n\n return pooled\n\n @staticmethod\n def _generate_masks_per_image(normed_boxes, mask_h, mask_w):\n num_boxes = len(normed_boxes)\n boxes = normed_boxes * torch.tensor(\n [[mask_w, mask_h, mask_w, mask_h]], device=normed_boxes.device)\n masks = torch.zeros(num_boxes, mask_h, mask_w,\n dtype=torch.bool, device=normed_boxes.device)\n for i, box in enumerate(boxes):\n x0, y0, x1, y1 = box.long().tolist()\n masks[i, y0:y1, x0:x1] = True\n\n return masks\n \n @staticmethod\n def _denormalize_boxes(normed_boxes, x):\n h, w = x.shape[-2:]\n denormed_boxes = []\n for boxes in normed_boxes:\n new_boxes = boxes.clone() # FIXME: do not change the value in normed_boxes!\n new_boxes[:, [0, 2]] *= w\n new_boxes[:, [1, 3]] *= h\n denormed_boxes.append(new_boxes)\n return denormed_boxes\n\n def _extract_roi_features_v1(self, x, normed_boxes):\n # used masks\n bs, _, h, w = x.shape\n patch_height, patch_width = self.patch_size\n mask_h, mask_w = h // patch_height, w // patch_width\n masks = [self._generate_masks_per_image(normed_boxes_, mask_h, mask_w)\n for normed_boxes_ in normed_boxes]\n\n return self.mask_attn_pool(x, masks)\n\n def _extract_roi_features_v3(self, x, normed_boxes): # v3 for extract two types\n # used masks\n bs, _, h, w = x.shape\n patch_height, patch_width = self.patch_size\n mask_h, mask_w = h // patch_height, w // patch_width\n masks = [self._generate_masks_per_image(normed_boxes_, mask_h, mask_w)\n for normed_boxes_ in normed_boxes]\n\n roi_features_v1, dense_x = self.mask_attn_pool(x, masks, return_dense=True)\n dense_x = F.normalize(dense_x, dim=-1) # normalize along last dimension\n dense_x = dense_x.permute(0, 3, 1, 2)\n roi_features_v2 = roi_align(dense_x, self._denormalize_boxes(normed_boxes, dense_x), \n (1, 1), 1.0, -1, True)[..., 0, 0]\n\n return roi_features_v1, roi_features_v2\n\n def _extract_roi_features_v2(self, x, normed_boxes):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n # assert h == w # TODO: support input of any shape, need to change the normed boxes to real boxes\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer.extract_feature_map(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n _, tokens = self._global_pool(x)\n else:\n _, tokens = self._global_pool(x)\n tokens = self.ln_post(tokens)\n\n if self.proj is not None:\n tokens = tokens @ self.proj\n tokens = F.normalize(tokens, dim=-1) # normalize along last dimension\n tokens = tokens.view(bs, h, w, -1).permute(0, 3, 1, 2)\n return roi_align(tokens, self._denormalize_boxes(normed_boxes, tokens),\n (1, 1), 1.0, -1, True)[..., 0, 0]\n\n def rescale_positional_embedding(self, out_size, dtype):\n h, w = out_size\n rescaled_positional_embedding = \\\n self.positional_embedding.new_zeros(1 + h*w, self.positional_embedding.shape[1])\n rescaled_positional_embedding[0] = self.positional_embedding[0]\n pe_2d = self.positional_embedding[1:].T.contiguous().view(\n 1, -1, *self.grid_size)\n pe_2d = F.interpolate(pe_2d, out_size, mode='bicubic', align_corners=False).view(-1, h*w)\n rescaled_positional_embedding[1:] = pe_2d.T.contiguous()\n\n return rescaled_positional_embedding.to(dtype=dtype)\n\n def _mask_attn_pool(self, x: torch.Tensor, attn_mask: torch.Tensor, num_mask_tokens: int, return_dense=False):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [\n self.class_embedding.to(x.dtype)\n + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x,\n ],\n dim=1,\n ) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n cls_embed = x[0:1]\n cls_embed = cls_embed.expand(num_mask_tokens, -1, -1)\n x = torch.cat([cls_embed, x], dim=0)\n if return_dense:\n x, x_dense = self.transformer.forward_image_dense(x, attn_mask)\n x_dense = x_dense.permute(1, 0, 2) # LND -> NLD\n x_dense = x_dense[:, num_mask_tokens + 1:]\n\n x_dense = self.ln_post(x_dense)\n\n if self.proj is not None:\n x_dense = x_dense @ self.proj\n x_dense = F.normalize(x_dense, dim=-1) # normalize along last dimension\n x_dense = x_dense.view(bs, h, w, -1)\n else:\n x = self.transformer(x, attn_mask)\n x_dense = None\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # [N, L, D]\n x = self.ln_post(x[:, :num_mask_tokens, :])\n\n if self.proj is not None:\n x = torch.einsum(\"nld,dc->nlc\", x, self.proj)\n\n return x, x_dense\n\n def mask_attn_pool(self, image, masks, return_dense=False):\n assert hasattr(self, \"positional_embedding\")\n batch_size = image.shape[0]\n assert batch_size == len(masks)\n num_masks_per_image = [mask.shape[0] for mask in masks]\n num_queries = max(num_masks_per_image)\n mask_h, mask_w = masks[0].shape[1:]\n\n batch_masks = torch.ones(batch_size, num_queries, mask_h, mask_w, dtype=torch.bool).to(image.device)\n for batch_id, mask in enumerate(masks):\n batch_masks[batch_id, :mask.shape[0]] = mask\n\n mask_token_attn_mask = torch.logical_not(batch_masks)\n # [B, Q, H//P x W//P]\n mask_token_attn_mask = mask_token_attn_mask.reshape(batch_size, num_queries, -1)\n\n num_mask_token = num_queries\n num_image_cls_token = (mask_h * mask_w + 1)\n num_image_token = num_image_cls_token - 1\n num_all_token = num_mask_token + num_image_cls_token\n\n # we start with no mask out\n attn_mask = torch.zeros(\n (num_all_token, num_all_token), dtype=torch.bool, device=image.device\n )\n\n # mask+cls+image token to mask token attention is masked out\n attn_mask[:, :num_mask_token] = True\n\n attn_mask = attn_mask.unsqueeze(0).repeat_interleave(batch_size, dim=0)\n attn_mask[:, :num_mask_token, -num_image_token:] = mask_token_attn_mask\n num_heads = self.num_heads # head width 64\n attn_mask = attn_mask.unsqueeze(1).expand(-1, num_heads, -1, -1)\n attn_mask = attn_mask.reshape(batch_size * num_heads, num_all_token, num_all_token)\n\n batch_mask_features, x_dense = self._mask_attn_pool(image, attn_mask, num_mask_token,\n return_dense=return_dense)\n\n mask_features = [batch_mask_features[batch_id, :num_masks]\n for batch_id, num_masks, in enumerate(num_masks_per_image)]\n if return_dense:\n # x_dense = F.normalize(x_dense, dim=-1).flatten(1, 2) # bs, h*w, c\n # masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n # x_dense = torch.repeat_interleave(\n # x_dense, torch.tensor(num_masks_per_image, device=x_dense.device), dim=0)\n # x_dense = (x_dense * masks.unsqueeze(-1)).sum(1) / masks.sum(1, keepdim=True)\n\n return torch.cat(mask_features), x_dense\n else:\n return torch.cat(mask_features)\n\n def encode_rois_and_image(self, x, normed_boxes):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n # assert h == w # TODO: support input of any shape, need to change the normed boxes to real boxes\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x, x_image = self.transformer.extract_feature_map(x, return_forward=True)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n _, tokens = self._global_pool(x)\n else:\n _, tokens = self._global_pool(x)\n tokens = self.ln_post(tokens)\n\n if self.proj is not None:\n tokens = tokens @ self.proj\n\n feature_map = tokens.view(bs, h * w, -1) # .permute(0, 3, 1, 2)\n feature_map = F.normalize(feature_map, dim=-1)\n feature_map = feature_map.view(bs, h, w, -1).permute(0, 3, 1, 2)\n x_rois = roi_align(feature_map, self._denormalize_boxes(normed_boxes, feature_map),\n (1, 1), 1.0, -1, True)[..., 0, 0]\n x_rois = F.normalize(x_rois, dim=-1)\n\n x_image = self.post_attention(x_image)\n x_image = F.normalize(x_image, dim=-1)\n\n return x_rois, x_image" }, { "identifier": "TextTransformer", "path": "src/open_clip/transformer.py", "snippet": "class TextTransformer(nn.Module):\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n context_length: int = 77,\n vocab_size: int = 49408,\n width: int = 512,\n heads: int = 8,\n layers: int = 12,\n ls_init_value: float = None,\n output_dim: int = 512,\n act_layer: Callable = nn.GELU,\n norm_layer: Callable = LayerNorm,\n embed_cls: bool = False,\n pad_id: int = 0,\n output_tokens: bool = False,\n ):\n super().__init__()\n self.output_tokens = output_tokens\n self.num_pos = self.context_length = context_length\n self.vocab_size = vocab_size\n self.width = width\n self.output_dim = output_dim\n self.heads = heads\n self.pad_id = pad_id\n\n self.text_projection = nn.Parameter(torch.empty(width, output_dim))\n\n if embed_cls:\n self.cls_emb = nn.Parameter(torch.empty(width))\n self.num_pos += 1\n else:\n self.cls_emb = None\n\n self.token_embedding = nn.Embedding(vocab_size, width)\n self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width))\n self.transformer = Transformer(\n width=width,\n layers=layers,\n heads=heads,\n ls_init_value=ls_init_value,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n self.ln_final = norm_layer(width)\n\n self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)\n\n self.init_parameters()\n\n def init_parameters(self):\n nn.init.normal_(self.token_embedding.weight, std=0.02)\n nn.init.normal_(self.positional_embedding, std=0.01)\n if self.cls_emb is not None:\n nn.init.normal_(self.cls_emb, std=0.01)\n\n proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)\n attn_std = self.transformer.width ** -0.5\n fc_std = (2 * self.transformer.width) ** -0.5\n for block in self.transformer.resblocks:\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n\n if self.text_projection is not None:\n nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)\n\n def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):\n assert unlocked_layers == 0 and freeze_layer_norm\n print(f'Freeze the text encoder', flush=True)\n for p in self.parameters():\n p.requires_grad = False\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.grad_checkpointing = enable\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.num_pos, self.num_pos)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n def build_cls_mask(self, text, cast_dtype: torch.dtype):\n cls_mask = (text != self.pad_id).unsqueeze(1)\n cls_mask = F.pad(cls_mask, (1, 0, cls_mask.shape[2], 0), value=1.0)\n additive_mask = torch.empty(cls_mask.shape, dtype=cast_dtype, device=cls_mask.device)\n additive_mask.fill_(0)\n additive_mask.masked_fill_(~cls_mask, float(\"-inf\"))\n additive_mask = torch.repeat_interleave(additive_mask, self.heads, 0)\n return additive_mask\n\n def _repeat(self, t, N: int):\n return t.reshape(1, 1, -1).repeat(N, 1, 1)\n\n def forward(self, text):\n cast_dtype = self.transformer.get_cast_dtype()\n seq_len = text.shape[1]\n\n x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]\n attn_mask = self.attn_mask\n if self.cls_emb is not None:\n seq_len += 1\n x = torch.cat([x, self._repeat(self.cls_emb, x.shape[0])], dim=1)\n cls_mask = self.build_cls_mask(text, cast_dtype)\n attn_mask = attn_mask[None, :seq_len, :seq_len] + cls_mask[:, :seq_len, :seq_len]\n\n x = x + self.positional_embedding[:seq_len].to(cast_dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x, attn_mask=attn_mask)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # x.shape = [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n if self.cls_emb is not None:\n pooled, tokens = x[:, -1], x[:, :-1]\n pooled = self.ln_final(pooled)\n else:\n x = self.ln_final(x)\n pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x\n\n if self.text_projection is not None:\n pooled = pooled @ self.text_projection\n\n if self.output_tokens:\n return pooled, tokens\n\n return pooled" }, { "identifier": "to_2tuple", "path": "src/open_clip/utils.py", "snippet": "def freeze_batch_norm_2d(module, module_match={}, name=''):\ndef _ntuple(n):\n def parse(x):" } ]
from dataclasses import dataclass from typing import Optional, Tuple, Union from torch import nn from torch.utils.checkpoint import checkpoint from .hf_model import HFTextEncoder from .modified_resnet import ModifiedResNet from .timm_model import TimmModel from .transformer import LayerNormFp32, LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer from .utils import to_2tuple import logging import math import numpy as np import torch import torch.nn.functional as F
15,630
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer n_queries: int = 256 # n_queries for attentional pooler attn_pooler_heads: int = 8 # n heads for attentional_pooling timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection timm_drop: float = 0. # head dropout timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name:
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer n_queries: int = 256 # n_queries for attentional pooler attn_pooler_heads: int = 8 # n heads for attentional_pooling timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection timm_drop: float = 0. # head dropout timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name:
visual = TimmModel(
2
2023-12-09 05:43:08+00:00
24k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Protocol\"] = self.readInt()\n fields[\"KeyVersion\"] = self.readInt()\n fields[\"MajorVersion\"] = self.readInt()\n fields[\"MinorVersion\"] = self.readInt()\n fields[\"Build\"] = self.readInt()\n fields[\"ContentHash\"] = self.readString()\n fields[\"DeviceType\"] = self.readInt()\n fields[\"AppStore\"] = self.readInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20100, fields, cryptoInit)\n\n def getMessageType(self):\n return 10100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginMessage", "path": "Heart/Packets/Client/Authentification/LoginMessage.py", "snippet": "class LoginMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"ClientMajor\"] = self.readInt()\n fields[\"ClientMinor\"] = self.readInt()\n fields[\"ClientBuild\"] = self.readInt()\n fields[\"ResourceSha\"] = self.readString()\n fields[\"Device\"] = self.readString()\n fields[\"PreferredLanguage\"] = self.readDataReference()\n fields[\"PreferredDeviceLanguage\"] = self.readString()\n fields[\"OSVersion\"] = self.readString()\n fields[\"isAndroid\"] = self.readBoolean()\n fields[\"IMEI\"] = self.readString()\n fields[\"AndroidID\"] = self.readString()\n fields[\"isAdvertisingEnabled\"] = self.readBoolean()\n fields[\"AppleIFV\"] = self.readString()\n fields[\"RndKey\"] = self.readInt()\n fields[\"AppStore\"] = self.readVInt()\n fields[\"ClientVersion\"] = self.readString()\n fields[\"TencentOpenId\"] = self.readString()\n fields[\"TencentToken\"] = self.readString()\n fields[\"TencentPlatform\"] = self.readVInt()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n fields[\"AppLicensingSignature\"] = self.readString()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n if fields[\"ClientMajor\"]==53:\n calling_instance.player.ClientVersion = f'{str(fields[\"ClientMajor\"])}.{str(fields[\"ClientBuild\"])}.{str(fields[\"ClientMinor\"])}'\n fields[\"Socket\"] = calling_instance.client\n db_instance = DatabaseHandler()\n if db_instance.playerExist(fields[\"PassToken\"], fields[\"AccountID\"]):\n player_data = json.loads(db_instance.getPlayerEntry(fields[\"AccountID\"])[2])\n db_instance.loadAccount(calling_instance.player, fields[\"AccountID\"])\n else:\n db_instance.createAccount(calling_instance.player.getDataTemplate(fields[\"AccountID\"][0], fields[\"AccountID\"][1], fields[\"PassToken\"]))\n ClientsManager.AddPlayer(calling_instance.player.ID, calling_instance.client)\n Messaging.sendMessage(20104, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24399, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForBattleEndMessage", "path": "Heart/Packets/Client/Battle/AskForBattleEndMessage.py", "snippet": "class AskForBattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Unk1\"] = self.readVInt()\n fields[\"Result\"] = self.readVInt()\n fields[\"Rank\"] = self.readVInt()\n fields[\"MapID\"] = self.readDataReference()\n fields[\"HeroesCount\"] = self.readVInt()\n fields[\"Heroes\"] = []\n for i in range(fields[\"HeroesCount\"]): fields[\"Heroes\"].append({\"Brawler\": {\"ID\": self.readDataReference(), \"SkinID\": self.readDataReference()}, \"Team\": self.readVInt(), \"IsPlayer\": self.readBoolean(), \"PlayerName\": self.readString()})\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(23456, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14110\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ChangeAvatarNameMessage", "path": "Heart/Packets/Client/Home/ChangeAvatarNameMessage.py", "snippet": "class ChangeAvatarNameMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeString(fields[\"Name\"])\n self.writeBoolean(fields[\"NameSetByUser\"])\n\n def decode(self):\n fields = {}\n fields[\"Name\"] = self.readString()\n fields[\"NameSetByUser\"] = self.readBoolean()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n db_instance = DatabaseHandler()\n playerData = db_instance.getPlayer(calling_instance.player.ID)\n playerData[\"Name\"] = fields[\"Name\"]\n playerData[\"Registered\"] = True\n db_instance.updatePlayerData(playerData, calling_instance)\n fields[\"Socket\"] = calling_instance.client\n fields[\"Command\"] = {\"ID\": 201}\n Messaging.sendMessage(24111, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10212\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "EndClientTurnMessage", "path": "Heart/Packets/Client/Home/EndClientTurnMessage.py", "snippet": "class EndClientTurnMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n fields[\"Tick\"] = self.readVInt()\n fields[\"Checksum\"] = self.readVInt()\n fields[\"CommandsCount\"] = self.readVInt()\n super().decode(fields)\n fields[\"Commands\"] = []\n for i in range(fields[\"CommandsCount\"]):\n fields[\"Commands\"].append({\"ID\": self.readVInt()})\n if LogicCommandManager.commandExist(fields[\"Commands\"][i][\"ID\"]):\n command = LogicCommandManager.createCommand(fields[\"Commands\"][i][\"ID\"])\n print(\"Command\", LogicCommandManager.getCommandsName(fields[\"Commands\"][i][\"ID\"]))\n if command is not None:\n fields[\"Commands\"][i][\"Fields\"] = command.decode(self)\n fields[\"Commands\"][i][\"Instance\"] = command\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n for command in fields[\"Commands\"]:\n if \"Instance\" not in command.keys():\n return\n\n if hasattr(command[\"Instance\"], 'execute'):\n command[\"Instance\"].execute(calling_instance, command[\"Fields\"], cryptoInit)\n if command[\"ID\"] == 519:\n Messaging.sendMessage(24104, {\"Socket\": calling_instance.client, \"ServerChecksum\": 0, \"ClientChecksum\": 0, \"Tick\": 0}, cryptoInit)\n\n def getMessageType(self):\n return 14102\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeFromOfflinePractiseMessage", "path": "Heart/Packets/Client/Home/GoHomeFromOfflinePractiseMessage.py", "snippet": "class GoHomeFromOfflinePractiseMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14109\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeMessage", "path": "Heart/Packets/Client/Home/GoHomeMessage.py", "snippet": "class GoHomeMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 17750\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GetPlayerProfileMessage", "path": "Heart/Packets/Client/Home/GetPlayerProfileMessage.py", "snippet": "class GetPlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"BattleInfoBoolean\"] = self.readBoolean()\n if fields[\"BattleInfoBoolean\"]:\n fields[\"unk1\"] = self.readVInt()\n fields[\"AnotherID\"] = self.readLong()\n fields[\"unk2\"] = self.readVInt()\n for i in self.readVInt():\n fields[\"CsvID\"] = self.readDataReference()\n fields[\"unk3\"] = self.readVInt()\n fields[\"unk4\"] = self.readVInt()\n fields[\"unk5\"] = self.readVInt()\n fields[\"unk6\"] = self.readVInt()\n fields[\"PlayerName\"] = self.readString()\n fields[\"unk7\"] = self.readVInt()\n fields[\"Thumbnail\"] = self.readVInt()\n fields[\"NameColor\"] = self.readVInt()\n fields[\"unk10\"] = self.readVInt()\n fields[\"unk11\"] = self.readVInt()\n fields[\"PlayerHighID\"] = self.readInt()\n fields[\"PlayerLowID\"] = self.readInt()\n super().decode(fields)\n\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24113, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 15081\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForAllianceDataMessage", "path": "Heart/Packets/Client/Home/AskForAllianceDataMessage.py", "snippet": "class AskForAllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"id\"] = self.readVLong()\n fields[\"isInAlliance\"] = self.readBoolean()\n if fields[\"isInAlliance\"] == True:\n fields[\"anotherIDHigh\"] = self.readVInt()\n fields[\"anotherIDLow\"] = self.readVInt()\n super().decode(fields)\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24301, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14302\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveMessage", "path": "Heart/Packets/Client/Socket/KeepAliveMessage.py", "snippet": "class KeepAliveMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20108, fields, cryptoInit)\n\n def getMessageType(self):\n return 10108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginFailedMessage", "path": "Heart/Packets/Server/Authentification/LoginFailedMessage.py", "snippet": "class LoginFailedMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeInt(fields['ErrorID'])\n self.writeString(fields['FingerprintData'])\n self.writeString()\n self.writeString(fields['ContentURL'])\n self.writeString()\n self.writeString(fields['Message'])\n self.writeInt(0)\n self.writeBoolean(False)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeString()\n self.writeVInt(0)\n self.writeString()\n self.writeBoolean(False)\n\n def decode(self):\n fields = {}\n fields[\"ErrorCode\"] = self.readInt()\n fields[\"ResourceFingerprintData\"] = self.readString()\n fields[\"RedirectDomain\"] = self.readString()\n fields[\"ContentURL\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"Reason\"] = self.readString()\n fields[\"SecondsUntilMaintenanceEnd\"] = self.readInt()\n fields[\"ShowContactSupportForBan\"] = self.readBoolean()\n fields[\"CompressedFingerprintData\"] = self.readBytesWithoutLength()\n fields[\"ContentURLListCount\"] = self.readInt()\n fields[\"ContentURLList\"] = []\n for i in range(fields[\"ContentURLListCount\"]):\n fields[\"ContentURLList\"].append(self.readString())\n fields[\"KunlunAppStore\"] = self.readInt()\n fields[\"MaintenanceType\"] = self.readInt()\n fields[\"HelpshiftFaqId\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"Unk1\"] = self.readBoolean()\n fields[\"Unk2\"] = self.readBoolean()\n fields[\"Unk3\"] = self.readString()\n fields[\"Unk4\"] = self.readVInt()\n fields[\"Unk5\"] = self.readString()\n fields[\"OptionalTargetedAccountIdState\"] = self.readBoolean()\n if fields[\"OptionalTargetedAccountIdState\"] == True:\n fields[\"OptionalTargetedAccountId\"] = self.readLong()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20103\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginOkMessage", "path": "Heart/Packets/Server/Authentification/LoginOkMessage.py", "snippet": "class LoginOkMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 1\n\n def encode(self, fields, player):\n self.writeLong(player.ID[0], player.ID[1])\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(player.Token)\n self.writeString()\n self.writeString()\n self.writeInt(53)\n self.writeInt(176)\n self.writeInt(1)\n self.writeString(\"dev\")\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeString(\"RU\")\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeInt(2)\n self.writeString('https://game-assets.brawlstarsgame.com')\n self.writeString('http://a678dbc1c015a893c9fd-4e8cc3b1ad3a3c940c504815caefa967.r87.cf2.rackcdn.com')\n self.writeInt(2)\n self.writeString('https://event-assets.brawlstars.com')\n self.writeString('https://24b999e6da07674e22b0-8209975788a0f2469e68e84405ae4fcf.ssl.cf2.rackcdn.com/event-assets')\n self.writeVInt(0)\n self.writeCompressedString(b'')\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeString('https://play.google.com/store/apps/details?id=com.supercell.brawlstars')\n self.writeString()\n self.writeBoolean(False)\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"HomeID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"FacebookID\"] = self.readString()\n fields[\"GamecenterID\"] = self.readString()\n fields[\"ServerMajorVersion\"] = self.readInt()\n fields[\"ContentVersion\"] = self.readInt()\n fields[\"ServerBuild\"] = self.readInt()\n fields[\"ServerEnvironment\"] = self.readString()\n fields[\"SessionCount\"] = self.readInt()\n fields[\"PlayTimeSeconds\"] = self.readInt()\n fields[\"DaysSinceStartedPlaying\"] = self.readInt()\n fields[\"FacebookAppID\"] = self.readString()\n fields[\"ServerTime\"] = self.readString()\n fields[\"AccountCreatedDate\"] = self.readString()\n fields[\"StartupCooldownSeconds\"] = self.readInt()\n fields[\"GoogleServiceID\"] = self.readString()\n fields[\"LoginCountry\"] = self.readString()\n fields[\"KunlunID\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"TencentID\"] = self.readString()\n\n ContentUrlCount = self.readInt()\n fields[\"GameAssetsUrls\"] = []\n for i in range(ContentUrlCount):\n fields[\"GameAssetsUrls\"].append(self.readString())\n\n EventUrlCount = self.readInt()\n fields[\"EventAssetsUrls\"] = []\n for i in range(EventUrlCount):\n fields[\"EventAssetsUrls\"].append(self.readString())\n\n fields[\"SecondsUntilAccountDeletion\"] = self.readVInt()\n fields[\"SupercellIDToken\"] = self.readCompressedString()\n fields[\"IsSupercellIDLogoutAllDevicesAllowed\"] = self.readBoolean()\n fields[\"isSupercellIDEligible\"] = self.readBoolean()\n fields[\"LineID\"] = self.readString()\n fields[\"SessionID\"] = self.readString()\n fields[\"KakaoID\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"YoozooPayNotifyUrl\"] = self.readString()\n fields[\"UnbotifyEnabled\"] = self.readBoolean()\n\n Unknown1 = self.readBoolean()\n fields[\"Unknown1\"] = Unknown1\n if Unknown1:\n fields[\"Unknown2\"] = self.readString()\n\n Unknown3 = self.readBoolean()\n fields[\"Unknown3\"] = Unknown1\n if Unknown3:\n fields[\"Unknown4\"] = self.readString()\n\n Unknown5 = self.readBoolean()\n fields[\"Unknown5\"] = Unknown1\n if Unknown5:\n fields[\"Unknown6\"] = self.readString()\n\n Unknown7 = self.readBoolean()\n fields[\"Unknown7\"] = Unknown1\n if Unknown7:\n fields[\"Unknown8\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OutOfSyncMessage", "path": "Heart/Packets/Server/Authentification/OutOfSyncMessage.py", "snippet": "class OutOfSyncMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeVInt(fields[\"ServerChecksum\"])\n self.writeVInt(fields[\"ClientChecksum\"])\n self.writeVInt(fields[\"Tick\"])\n\n def decode(self):\n fields = {}\n fields[\"ServerChecksum\"] = self.readVInt()\n fields[\"ClientChecksum\"] = self.readVInt()\n fields[\"Tick\"] = self.readVInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ServerHelloMessage", "path": "Heart/Packets/Server/Authentification/ServerHelloMessage.py", "snippet": "class ServerHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeBytes(urandom(24), 24)\n\n def decode(self):\n fields = {}\n fields[\"Random\"] = self.readBytesWithoutLength()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "BattleEndMessage", "path": "Heart/Packets/Server/Battle/BattleEndMessage.py", "snippet": "class BattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeLong(0, 0) # Battle UUID High\n self.writeLong(0, 0) # Battle UUID Low\n self.writeVInt(2) # Battle End Game Mode (gametype)\n self.writeVInt(fields[\"Rank\"]) # Result (Victory/Defeat/Draw/Rank Score)\n self.writeVInt(0) # Tokens Gained (Gained Keys)\n self.writeVInt(0) # Trophies Result (Metascore change)\n self.writeVInt(0) # Power Play Points Gained (Pro League Points)\n self.writeVInt(0) # Doubled Tokens (Double Keys)\n self.writeVInt(0) # Double Token Event (Double Event Keys)\n self.writeVInt(0) # Token Doubler Remaining (Double Keys Remaining)\n self.writeVInt(0) # game Lenght In Seconds\n self.writeVInt(0) # Epic Win Power Play Points Gained (op Win Points)\n self.writeVInt(0) # Championship Level Reached (CC Wins)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n\n self.writeVInt(fields[\"HeroesCount\"])\n for heroEntry in fields[\"Heroes\"]:\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeByte(1)\n for i in range(1):\n self.writeDataReference(heroEntry[\"Brawler\"][\"ID\"][0], heroEntry[\"Brawler\"][\"ID\"][1])\n self.writeByte(1)\n for i in range(1):\n if (heroEntry[\"Brawler\"][\"SkinID\"] is None):\n self.writeVInt(0)\n else:\n self.writeDataReference(heroEntry[\"Brawler\"][\"SkinID\"][0], heroEntry[\"Brawler\"][\"SkinID\"][1])\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(1250)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(11)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n if heroEntry[\"IsPlayer\"]:\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(heroEntry[\"PlayerName\"])\n self.writeVInt(100)\n self.writeVInt(28000000)\n self.writeVInt(43000000)\n self.writeVInt(-2)\n if heroEntry[\"IsPlayer\"]:\n self.writeBoolean(True)\n self.writeVLong(5, 4181497)\n self.writeString('haccer club')\n self.writeDataReference(8, 16)\n else:\n self.writeBoolean(False)\n\n self.writeInt8(1)\n self.writeVInt(5978)\n self.writeInt8(1)\n self.writeVInt(0)\n\n self.writeInt16(5)\n self.writeInt16(3)\n self.writeInt(27328)\n self.writeInt(25659)\n\n self.writeDataReference(0)\n\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n\n def decode(self):\n fields = {}\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23456\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AvailableServerCommandMessage", "path": "Heart/Packets/Server/Home/AvailableServerCommandMessage.py", "snippet": "class AvailableServerCommandMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(fields[\"Command\"][\"ID\"])\n command = LogicCommandManager.createCommand(fields[\"Command\"][\"ID\"], self.messagePayload)\n self.messagePayload = command.encode(fields)\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24111\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LobbyInfoMessage", "path": "Heart/Packets/Server/Home/LobbyInfoMessage.py", "snippet": "class LobbyInfoMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(ClientsManager.GetCount())\n self.writeString(f\"\"\"Version: {player.ClientVersion}\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\"\"\")\n self.writeVInt(0) # count event\n self.writeVInt(0) # new timer in v51\n\n def decode(self):\n fields = {}\n fields[\"PlayerCount\"] = self.readVInt()\n fields[\"Text\"] = self.readString()\n fields[\"Unk1\"] = self.readVInt()\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23457\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OwnHomeDataMessage", "path": "Heart/Packets/Server/Home/OwnHomeDataMessage.py", "snippet": "class OwnHomeDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1688816070)\n self.writeVInt(1191532375)\n self.writeVInt(2023189)\n self.writeVInt(73530)\n\n self.writeVInt(player.Trophies)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(player.HighestTrophies) \n self.writeVInt(player.TrophyRoadTier)\n self.writeVInt(player.Experience)\n self.writeDataReference(28, player.Thumbnail)\n self.writeDataReference(43, player.Namecolor)\n\n self.writeVInt(26)\n for x in range(26):\n self.writeVInt(x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n \n self.writeVInt(len(player.OwnedSkins))\n for x in player.OwnedSkins:\n self.writeDataReference(29, x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(0)\n self.writeVInt(2)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(115)\n self.writeVInt(335442)\n self.writeVInt(1001442)\n self.writeVInt(5778642) \n\n self.writeVInt(120)\n self.writeVInt(200)\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(1) # Shop Offers\n\n self.writeVInt(1) # RewardCount\n\n self.writeVInt(38) # ItemType\n self.writeVInt(1337) # Amount\n self.writeDataReference(0) # CsvID\n self.writeVInt(0) # SkinID\n\n self.writeVInt(0) # Currency(0-Gems, 1-Gold, 3-StarpoInts)\n self.writeVInt(0) # Cost\n self.writeVInt(0) # Time\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # Daily Offer\n self.writeVInt(0) # Old price\n self.writeString('Offer') # Text\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString(\"offer_bgr_xmas23\") # Background\n self.writeVInt(0)\n self.writeBoolean(False) # This purchase is already being processed\n self.writeVInt(0) # Type Benefit\n self.writeVInt(0) # Benefit\n self.writeString()\n self.writeBoolean(False) # One time offer\n self.writeBoolean(False) # Claimed\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n \n self.writeVInt(20)\n self.writeVInt(1428)\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n self.writeVInt(30)\n\n self.writeByte(1) # count brawlers selected\n self.writeDataReference(16, player.SelectedBrawlers[0]) # selected brawler\n self.writeString(player.Region) # location\n self.writeString(player.ContentCreator) # supported creator\n\n self.writeVInt(6) \n self.writeVInt(1) \n self.writeVInt(9) \n self.writeVInt(1) \n self.writeVInt(22) \n self.writeVInt(3) \n self.writeVInt(25) \n self.writeVInt(1) \n self.writeVInt(24) \n self.writeVInt(0)\n self.writeVInt(15)\n self.writeVInt(32447)\n self.writeVInt(28)\n\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n for season in range(1):\n self.writeVInt(22-1)\n self.writeVInt(40000)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(0) \n\n self.writeBoolean(True) # Vanity items\n self.writeVInt(len(player.OwnedThumbnails)+len(player.OwnedPins))\n for x in player.OwnedThumbnails:\n self.writeVInt(28)\n self.writeVInt(x)\n self.writeVInt(0)\n for x in player.OwnedPins:\n self.writeVInt(52)\n self.writeVInt(x)\n self.writeVInt(0)\n\n\n self.writeBoolean(False) # Power league season data\n\n self.writeInt(0)\n self.writeVInt(0)\n self.writeVInt(16)\n self.writeVInt(76)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2023189)\n\n self.writeVInt(35) # event slot id\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(3)\n self.writeVInt(4)\n self.writeVInt(5)\n self.writeVInt(6)\n self.writeVInt(7)\n self.writeVInt(8)\n self.writeVInt(9)\n self.writeVInt(10)\n self.writeVInt(11)\n self.writeVInt(12)\n self.writeVInt(13) \n self.writeVInt(14)\n self.writeVInt(15)\n self.writeVInt(16)\n self.writeVInt(17)\n self.writeVInt(18) \n self.writeVInt(19)\n self.writeVInt(20)\n self.writeVInt(21) \n self.writeVInt(22)\n self.writeVInt(23)\n self.writeVInt(24)\n self.writeVInt(25)\n self.writeVInt(26)\n self.writeVInt(27)\n self.writeVInt(28)\n self.writeVInt(29)\n self.writeVInt(30)\n self.writeVInt(31)\n self.writeVInt(32)\n self.writeVInt(33)\n self.writeVInt(34)\n self.writeVInt(35)\n\n self.writeVInt(1)\n\n self.writeVInt(4)\n self.writeVInt(7)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(72292)\n self.writeVInt(10) \n self.writeDataReference(15, 21) # map id\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeString(\"\")\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # MapMaker map structure array\n self.writeVInt(0)\n self.writeBoolean(False) # Power League array entry\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeVInt(0) \n self.writeVInt(0) \n self.writeVInt(0) \n self.writeBoolean(False) \n\n self.writeVInt(0)\n \n ByteStreamHelper.encodeIntList(self, [20, 35, 75, 140, 290, 480, 800, 1250, 1875, 2800])\n ByteStreamHelper.encodeIntList(self, [30, 80, 170, 360]) # Shop Coins Price\n ByteStreamHelper.encodeIntList(self, [300, 880, 2040, 4680]) # Shop Coins Amount\n\n self.writeVInt(0) \n\n self.writeVInt(1)\n self.writeVInt(41000086) # theme\n self.writeVInt(1)\n\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(4)\n\n ByteStreamHelper.encodeIntList(self, [0, 29, 79, 169, 349, 699])\n ByteStreamHelper.encodeIntList(self, [0, 160, 450, 500, 1250, 2500])\n\n self.writeLong(0, 1) # Player ID\n\n self.writeVInt(0) # Notification factory\n \n self.writeVInt(1)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeBoolean(False) # Login Calendar\n self.writeVInt(0)\n self.writeBoolean(True) # Starr Road\n for i in range(7):\n self.writeVInt(0)\n\n self.writeVInt(0) # Mastery\n\n #BattleCard\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n\n self.writeVInt(0) #Brawler's BattleCards\n\n self.writeVInt(5)\n for i in range(5):\n self.writeDataReference(80, i)\n self.writeVInt(-1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(86400*24)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(False)\n\n # end LogicClientHome\n\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeStringReference(player.Name)\n self.writeBoolean(player.Registered)\n self.writeInt(-1)\n\n self.writeVInt(17)\n unlocked_brawler = [i['CardID'] for x,i in player.OwnedBrawlers.items()]\n self.writeVInt(len(unlocked_brawler) + 2)\n for x in unlocked_brawler:\n self.writeDataReference(23, x)\n self.writeVInt(-1)\n self.writeVInt(1)\n\n self.writeDataReference(5, 8)\n self.writeVInt(-1)\n self.writeVInt(player.Coins)\n\n self.writeDataReference(5, 23)\n self.writeVInt(-1)\n self.writeVInt(player.Blings)\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"Trophies\"])\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroHighScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"HighestTrophies\"])\n\n self.writeVInt(0) # Array\n\n self.writeVInt(0) # HeroPower\n \n self.writeVInt(len(player.OwnedBrawlers)) # HeroLevel\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"PowerLevel\"]-1)\n\n self.writeVInt(0) # hero star power gadget and hypercharge\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroSeenState\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(2)\n\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n\n self.writeVInt(player.Gems) # Diamonds\n self.writeVInt(player.Gems) # Free Diamonds\n self.writeVInt(10) # Player Level\n self.writeVInt(100)\n self.writeVInt(0) # CumulativePurchasedDiamonds or Avatar User Level Tier | 10000 < Level Tier = 3 | 1000 < Level Tier = 2 | 0 < Level Tier = 1\n self.writeVInt(100) # Battle Count\n self.writeVInt(10) # WinCount\n self.writeVInt(80) # LoseCount\n self.writeVInt(50) # WinLooseStreak\n self.writeVInt(20) # NpcWinCount\n self.writeVInt(0) # NpcLoseCount\n self.writeVInt(2) # TutorialState | shouldGoToFirstTutorialBattle = State == 0\n self.writeVInt(12)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString()\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(1)\n\n def decode(self):\n fields = {}\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveServerMessage", "path": "Heart/Packets/Server/Socket/KeepAliveServerMessage.py", "snippet": "class KeepAliveServerMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "PlayerProfileMessage", "path": "Heart/Packets/Server/Home/PlayerProfileMessage.py", "snippet": "class PlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVLong(fields[\"PlayerHighID\"], fields[\"PlayerLowID\"])\n self.writeDataReference(16,11) # \n self.writeVInt(70)\n for i in range(70):\n self.writeDataReference(16, i)\n self.writeDataReference(0)\n self.writeVInt(500) # trophies\n self.writeVInt(1250) # highestTrophies\n self.writeVInt(11) #power level\n \n self.writeVInt(18)\n\n self.writeVInt(1) \n self.writeVInt(1) # 3v3 victories\n\n self.writeVInt(2)\n self.writeVInt(528859) # total exp\n\n self.writeVInt(3)\n self.writeVInt(3) # current trophies\n\n self.writeVInt(4)\n self.writeVInt(4) # highest trophies\n\n self.writeVInt(5) \n self.writeVInt(5) # unlocked brawler?\n\n self.writeVInt(8)\n self.writeVInt(6) # solo victories\n\n self.writeVInt(11) \n self.writeVInt(7) # duo victories\n\n self.writeVInt(9) \n self.writeVInt(8) # highest level robo rumble\n\n self.writeVInt(12) \n self.writeVInt(9) # highest level boss fight\n\n self.writeVInt(13)\n self.writeVInt(10) # highest power league points\n\n self.writeVInt(14)\n self.writeVInt(11) # some power league stuff\n\n self.writeVInt(15)\n self.writeVInt(12) # most challenge win\n\n self.writeVInt(16) #highest level city rampage\n self.writeVInt(13)\n\n self.writeVInt(18) #highest solo power league rank\n self.writeVInt(14)\n\n self.writeVInt(17) #highest team power league rank\n self.writeVInt(15)\n\n self.writeVInt(19) # highest Club league rank\n self.writeVInt(16)\n\n self.writeVInt(20) # number fame\n self.writeVInt(1000)\n\n self.writeVInt(21)\n self.writeVInt(502052) #v50\n\n self.writeString(player.Name) #PlayerInfo\n self.writeVInt(100)\n self.writeVInt(28000000 + player.Thumbnail)\n self.writeVInt(43000000 + player.Namecolor)\n self.writeVInt(14)\n\n self.writeBoolean(True)\n self.writeVInt(300)\n\n self.writeString(\"hello world\")\n self.writeVInt(100)\n self.writeVInt(200)\n self.writeDataReference(29, 558)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n\n self.writeBoolean(True) #alliance\n self.writeLong(0,1) #alliance ID\n self.writeString(\"haccers\") #alliance name\n self.writeDataReference(8,1) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(10000) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeDataReference(0)\n self.writeString(\"RU\") #location\n self.writeVInt(4) # unknown\n self.writeBoolean(True) #is Family friendly\n self.writeVInt(0)\n \n\n self.writeDataReference(25, 1) #alliance role\n self.writeVInt(16)\n\n def decode(self):\n pass\n # fields = {}\n # fields[\"PlayerCount\"] = self.readVInt()\n # fields[\"Text\"] = self.readString()\n # fields[\"Unk1\"] = self.readVInt()\n # super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24113\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "MyAllianceMessage", "path": "Heart/Packets/Server/Home/MyAllianceMessage.py", "snippet": "class MyAllianceMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1) # Online people in alliance\n self.writeBoolean(True) # isInAlliance\n self.writeDataReference(25, 4)\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(3) # type\n self.writeVInt(1) # member count\n self.writeVInt(9500) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(3) # unknown\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24399\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AllianceDataMessage", "path": "Heart/Packets/Server/Home/AllianceDataMessage.py", "snippet": "class AllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeBoolean(True)\n\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(player.Trophies) # total trophies\n self.writeVInt(0) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(1) # people online\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n self.writeString(\"this is the hacciest club in the world\")\n\n self.writeVInt(1) # member count\n self.writeLong(player.ID[0], player.ID[1]) # player ID\n self.writeVInt(2) # role\n self.writeVInt(player.Trophies) # trophies\n self.writeVInt(0) # status: 0=offline 2=online\n self.writeVInt(1) # last connected time seconds ?\n highestPowerLeagueRank = 2\n self.writeVInt(highestPowerLeagueRank)\n if highestPowerLeagueRank != 0:\n self.writeVInt(2) #solo\n self.writeVInt(1) #duo\n self.writeBoolean(False) # boolean always false?\n\n self.writeString(player.Name) # player name\n self.writeVInt(100) # VInt always 100\n self.writeVInt(28000000 + player.Thumbnail) # thumbnail\n self.writeVInt(43000000 + player.Namecolor) # name color\n self.writeVInt(46000000 + player.Namecolor)\n\n self.writeVInt(-1) # most people have it -1 but some with something\n self.writeBoolean(False) # whats this ? only 2/30 people have it true in my club\n week = 58 # week 58 of club league as of 2023/07/05, this number is 0 if you just arrived in the club\n self.writeVInt(week)\n if week != 0: # club league week number?\n self.writeVInt(3) # day\n self.writeVInt(18) # total club trophies earned\n self.writeVInt(0) # event day club trophies earned\n self.writeVInt(8) # total tickets used\n self.writeVInt(0) # event day tickets used\n self.writeVInt(6) # event day max tickets\n self.writeVInt(6) # event day tickets left\n self.writeVInt(0) # event day player ranking\n self.writeBoolean(True) # everyone have it to true\n self.writeVInt(200) # player experience lvl but why tf it doesn't show for some people\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24301\n\n def getMessageVersion(self):\n return self.messageVersion" } ]
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
15,770
10513: 'AskForPlayingFacebookFriendsMessage', 10514: 'AskForPlayingKakaoFriendsMessage', 10515: 'AskForPlayingTencentFriendsMessage', 10516: 'AskForPlayingLineFriendsMessage', 10517: 'AskForPlayingSupercellFriendsMessage', 10523: 'YoozooBillingRequestMessage', 10555: 'ClientInputMessage', 10576: 'SetBlockFriendRequestsMessage', 10599: 'AskForFriendSuggestionsMessage', 10636: 'SCIDBindAccountMessage', 11736: 'SCIDLogoutAllDevicesMessage', 12100: 'CreatePlayerMapMessage', 12101: 'DeletePlayerMapMessage', 12102: 'GetPlayerMapsMessage', 12103: 'UpdatePlayerMapMessage', 12104: 'SubmitPlayerMapMessage', 12105: 'PublishPlayerMapMessage', 12106: 'ChangePlayerMapNameMessage', 12107: 'EnterMapEditorMessage', 12108: 'GoHomeFromMapEditorMessage', 12110: 'TeamSetPlayerMapMessage', 12111: 'SignoffPlayerMapMessage', 12125: 'ReportPlayerMapMessage', 12152: 'RankedMatchBanHeroMessage', 12155: 'RankedMatchPickHeroMessage', 12157: 'RankedMatchUpdateHeroDataMessage', 12905: 'GetCurrentBattleReplayDataMessage', 12998: 'SetCountryMessage', 13922: 'AcceptTokenFriendMessage', 14101: GoHomeMessage, 14102: EndClientTurnMessage, 14103: 'StartGameMessage', 14104: 'StartSpectateMessage', 14105: 'HomeLogicStoppedMessage', 14106: 'CancelMatchmakingMessage', 14107: 'StopSpectateMessage', 14108: 'GoHomeFromSpectateMessage', #14109: GoHomeFromOfflinePractiseMessage, //before v50 14110: AskForBattleEndMessage, #14113: GetPlayerProfileMessage, //before v50 14114: 'GetBattleLogMessage', 14115: 'BattleLogViewReplayMessage', 14116: 'ViewReplayByStringMessage', 14117: 'RequestMatchCancelMessage', 14118: 'SinglePlayerMatchRequestMessage', 14166: 'ChronosEventSeenMessage', 14167: 'ChronosEventSeenMessage', 14177: 'PlayAgainMessage', 14178: 'DebugCommandMessage', 14199: 'LookForGameRoomRequestMessage', 14211: 'UnbindFacebookAccountMessage', 14201: 'BindFacebookAccountMessage', 14202: 'BindKakaoAccountMessage', 14203: 'BingLineAccountMessage', 14212: 'BindGamecenterAccountMessage', 14213: 'UnbindKakaoAccountMessage', 14214: 'UnbindLineAccountMessage', 14262: 'BindGoogleServiceAccountMessage', 14266: 'BindTencentAccountMessage', 14268: 'TencentCheckCanPayMessage', 14276: 'TencentAntiAddictionInstructionExecutedMessage', 14277: 'GetSeasonRewardsMessage', 14299: 'SetAllianceCountryMessage', 14301: 'CreateAllianceMessage', 14302: AskForAllianceDataMessage, 14303: 'AskForJoinableAlliancesListMessage', 14304: 'AskForAllianceStreamMessage', 14305: 'JoinAllianceMessage', 14306: 'ChangeAllianceMemberRoleMessage', 14307: 'KickAllianceMemberMessage', 14308: 'LeaveAllianceMessage', 14315: 'ChatToAllianceStreamMessage', 14316: 'ChangeAllianceSettingsMessage', 14317: 'RequestJoinAllianceMessage', 14321: 'RespondToAllianceJoinRequestMessage', 14322: 'SendAllianceInvitationMessage', 14323: 'JoinAllianceUsingInvitationMessage', 14324: 'SearchAlliancesMessage', 14326: 'SendAllianceInvitationToFriendMessage', 14330: 'SendAllianceMailMessage', 14350: 'TeamCreateMessage', 14351: 'TeamJoinMessage', 14352: 'TeamKickMessage', 14353: 'TeamLeaveMessage', 14354: 'TeamChangeMemberSettingsMessage', 14355: 'TeamSetMemberReadyMessage', 14356: 'TeamTogglePractiseMessage', 14357: 'TeamToggleMemberSideMessage', 14358: 'TeamSpectateMessage', 14359: 'TeamChatMessage', 14360: 'TeamPostAdMessage', 14361: 'TeamMemberStatusMessage', 14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage',
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage', 10108: KeepAliveMessage, 10109: 'UdpCheckConnectionMessage', 10110: 'AnalyticEventMessage', 10111: 'AccountIdentifiersMessage', 10112: 'AuthenticationCheckMessage', 10113: 'SetDeviceTokenMessage', 10116: 'ResetAccountMessage', 10117: 'ReportUserMessage', 10118: 'AccountSwitchedMessage', 10119: 'ReportAllianceStreamMessage', 10121: 'UnlockAccountMessage', 10150: 'AppleBillingRequestMessage', 10151: 'GoogleBillingRequestMessage', 10152: 'TencentBillingRequestMessage', 10153: 'CafeBazaarBillingRequestMessage', 10159: 'KunlunBillingRequestMessage', 10160: 'BillingCancelledByClientMessage', 10177: 'ClientInfoMessage', 10212: ChangeAvatarNameMessage, 10309: 'GetAllianceInviteTokenMessage', 10321: 'AttributionEventMessage', 10401: 'CreateGameMessage', 10501: 'AcceptFriendMessage', 10502: 'AddFriendMessage', 10503: 'AskForAddableFriendsMessage', 10504: 'AskForFriendListMessage', 10506: 'RemoveFriendMessage', 10507: 'AddFriendByEmailMessage', 10509: 'AddFriendByAvatarNameAndCodeMessage', 10512: 'AskForPlayingGamecenterFriendsMessage', 10513: 'AskForPlayingFacebookFriendsMessage', 10514: 'AskForPlayingKakaoFriendsMessage', 10515: 'AskForPlayingTencentFriendsMessage', 10516: 'AskForPlayingLineFriendsMessage', 10517: 'AskForPlayingSupercellFriendsMessage', 10523: 'YoozooBillingRequestMessage', 10555: 'ClientInputMessage', 10576: 'SetBlockFriendRequestsMessage', 10599: 'AskForFriendSuggestionsMessage', 10636: 'SCIDBindAccountMessage', 11736: 'SCIDLogoutAllDevicesMessage', 12100: 'CreatePlayerMapMessage', 12101: 'DeletePlayerMapMessage', 12102: 'GetPlayerMapsMessage', 12103: 'UpdatePlayerMapMessage', 12104: 'SubmitPlayerMapMessage', 12105: 'PublishPlayerMapMessage', 12106: 'ChangePlayerMapNameMessage', 12107: 'EnterMapEditorMessage', 12108: 'GoHomeFromMapEditorMessage', 12110: 'TeamSetPlayerMapMessage', 12111: 'SignoffPlayerMapMessage', 12125: 'ReportPlayerMapMessage', 12152: 'RankedMatchBanHeroMessage', 12155: 'RankedMatchPickHeroMessage', 12157: 'RankedMatchUpdateHeroDataMessage', 12905: 'GetCurrentBattleReplayDataMessage', 12998: 'SetCountryMessage', 13922: 'AcceptTokenFriendMessage', 14101: GoHomeMessage, 14102: EndClientTurnMessage, 14103: 'StartGameMessage', 14104: 'StartSpectateMessage', 14105: 'HomeLogicStoppedMessage', 14106: 'CancelMatchmakingMessage', 14107: 'StopSpectateMessage', 14108: 'GoHomeFromSpectateMessage', #14109: GoHomeFromOfflinePractiseMessage, //before v50 14110: AskForBattleEndMessage, #14113: GetPlayerProfileMessage, //before v50 14114: 'GetBattleLogMessage', 14115: 'BattleLogViewReplayMessage', 14116: 'ViewReplayByStringMessage', 14117: 'RequestMatchCancelMessage', 14118: 'SinglePlayerMatchRequestMessage', 14166: 'ChronosEventSeenMessage', 14167: 'ChronosEventSeenMessage', 14177: 'PlayAgainMessage', 14178: 'DebugCommandMessage', 14199: 'LookForGameRoomRequestMessage', 14211: 'UnbindFacebookAccountMessage', 14201: 'BindFacebookAccountMessage', 14202: 'BindKakaoAccountMessage', 14203: 'BingLineAccountMessage', 14212: 'BindGamecenterAccountMessage', 14213: 'UnbindKakaoAccountMessage', 14214: 'UnbindLineAccountMessage', 14262: 'BindGoogleServiceAccountMessage', 14266: 'BindTencentAccountMessage', 14268: 'TencentCheckCanPayMessage', 14276: 'TencentAntiAddictionInstructionExecutedMessage', 14277: 'GetSeasonRewardsMessage', 14299: 'SetAllianceCountryMessage', 14301: 'CreateAllianceMessage', 14302: AskForAllianceDataMessage, 14303: 'AskForJoinableAlliancesListMessage', 14304: 'AskForAllianceStreamMessage', 14305: 'JoinAllianceMessage', 14306: 'ChangeAllianceMemberRoleMessage', 14307: 'KickAllianceMemberMessage', 14308: 'LeaveAllianceMessage', 14315: 'ChatToAllianceStreamMessage', 14316: 'ChangeAllianceSettingsMessage', 14317: 'RequestJoinAllianceMessage', 14321: 'RespondToAllianceJoinRequestMessage', 14322: 'SendAllianceInvitationMessage', 14323: 'JoinAllianceUsingInvitationMessage', 14324: 'SearchAlliancesMessage', 14326: 'SendAllianceInvitationToFriendMessage', 14330: 'SendAllianceMailMessage', 14350: 'TeamCreateMessage', 14351: 'TeamJoinMessage', 14352: 'TeamKickMessage', 14353: 'TeamLeaveMessage', 14354: 'TeamChangeMemberSettingsMessage', 14355: 'TeamSetMemberReadyMessage', 14356: 'TeamTogglePractiseMessage', 14357: 'TeamToggleMemberSideMessage', 14358: 'TeamSpectateMessage', 14359: 'TeamChatMessage', 14360: 'TeamPostAdMessage', 14361: 'TeamMemberStatusMessage', 14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage',
15081: GetPlayerProfileMessage, #v50
7
2023-12-14 18:57:56+00:00
24k
GXNU-ZhongLab/ODTrack
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n CVPR, 2019\n https://arxiv.org/pdf/1809.07845.pdf\n\n Download the dataset from https://cis.temple.edu/lasot/download.html\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_dir if root is None else root\n super().__init__('LaSOT', root, image_loader)\n\n # Keep a list of all classes\n self.class_list = [f for f in os.listdir(self.root)]\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n with open(out_of_view_file, 'r') as f:\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k", "path": "lib/train/dataset/got10k.py", "snippet": "class Got10k(BaseVideoDataset):\n \"\"\" GOT-10k dataset.\n\n Publication:\n GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n Lianghua Huang, Xin Zhao, and Kaiqi Huang\n arXiv:1810.11981, 2018\n https://arxiv.org/pdf/1810.11981.pdf\n\n Download dataset from http://got-10k.aitestunion.com/downloads\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().got10k_dir if root is None else root\n super().__init__('GOT10k', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\n return sequence_meta_info\n\n def _read_meta(self, seq_path):\n try:\n with open(os.path.join(seq_path, 'meta_info.ini')) as f:\n meta_info = f.readlines()\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\n 'motion_class': meta_info[6].split(': ')[-1][:-1],\n 'major_class': meta_info[7].split(': ')[-1][:-1],\n 'root_class': meta_info[8].split(': ')[-1][:-1],\n 'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n with open(os.path.join(self.root, 'list.txt')) as f:\n dir_list = list(csv.reader(f))\n dir_list = [dir_name[0] for dir_name in dir_list]\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n with open(cover_file, 'r', newline='') as f:\n cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(self.root, self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "TrackingNet", "path": "lib/train/dataset/tracking_net.py", "snippet": "class TrackingNet(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_dir if root is None else root\n super().__init__('TrackingNet', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root, self.set_ids)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\n low_memory=False).values\n return torch.tensor(gt)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n return self.image_loader(frame_path)\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID", "path": "lib/train/dataset/imagenetvid.py", "snippet": "class ImagenetVID(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid\", root, image_loader)\n\n cache_file = os.path.join(root, 'cache.json')\n if os.path.isfile(cache_file):\n # If available, load the pre-processed cache file containing meta-info for each sequence\n with open(cache_file, 'r') as f:\n sequence_list_dict = json.load(f)\n\n self.sequence_list = sequence_list_dict\n else:\n # Else process the imagenet annotations and generate the cache file\n self.sequence_list = self._process_anno(root)\n\n with open(cache_file, 'w') as f:\n json.dump(self.sequence_list, f)\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return self.image_loader(frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta\n\n def _process_anno(self, root):\n # Builds individual tracklets\n base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\n\n all_sequences = []\n for set in sorted(os.listdir(base_vid_anno_path)):\n set_id = int(set.split('_')[-1])\n for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\n\n vid_id = int(vid.split('_')[-1])\n anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\n\n frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\n image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\n\n objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\n for f in anno_files]\n\n tracklets = {}\n\n # Find all tracklets along with start frame\n for f_id, all_targets in enumerate(objects):\n for target in all_targets:\n tracklet_id = target.find('trackid').text\n if tracklet_id not in tracklets:\n tracklets[tracklet_id] = f_id\n\n for tracklet_id, tracklet_start in tracklets.items():\n tracklet_anno = []\n target_visible = []\n class_name_id = None\n\n for f_id in range(tracklet_start, len(objects)):\n found = False\n for target in objects[f_id]:\n if target.find('trackid').text == tracklet_id:\n if not class_name_id:\n class_name_id = target.find('name').text\n x1 = int(target.find('bndbox/xmin').text)\n y1 = int(target.find('bndbox/ymin').text)\n x2 = int(target.find('bndbox/xmax').text)\n y2 = int(target.find('bndbox/ymax').text)\n\n tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\n target_visible.append(target.find('occluded').text == '0')\n\n found = True\n break\n if not found:\n break\n\n new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\n 'start_frame': tracklet_start, 'anno': tracklet_anno,\n 'target_visible': target_visible, 'image_size': image_size}\n all_sequences.append(new_sequence)\n\n return all_sequences" }, { "identifier": "MSCOCOSeq", "path": "lib/train/dataset/coco_seq.py", "snippet": "class MSCOCOSeq(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n # Load the COCO set.\n self.coco_set = COCO(self.anno_path)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k_lmdb", "path": "lib/train/dataset/got10k_lmdb.py", "snippet": "class Got10k_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n use_lmdb - whether the dataset is stored in lmdb format\n \"\"\"\n root = env_settings().got10k_lmdb_dir if root is None else root\n super().__init__('GOT10k_lmdb', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n def _read_meta(meta_info):\n\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\n 'motion_class': meta_info[6].split(': ')[-1],\n 'major_class': meta_info[7].split(': ')[-1],\n 'root_class': meta_info[8].split(': ')[-1],\n 'motion_adverb': meta_info[9].split(': ')[-1]})\n\n return object_meta\n sequence_meta_info = {}\n for s in self.sequence_list:\n try:\n meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" %s)\n sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\n except:\n sequence_meta_info[s] = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return sequence_meta_info\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n dir_str = decode_str(self.root, 'train/list.txt')\n dir_list = dir_str.split('\\n')\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line in got10k is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # full occlusion and out_of_view files\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n # Read these files\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1])) # the last line in got10k is empty\n occlusion = torch.ByteTensor(occ_list)\n cover_list = list(map(int, decode_str(self.root, cover_file).split('\\n')[:-1])) # the last line in got10k is empty\n cover = torch.ByteTensor(cover_list)\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(\"train\", self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "Lasot_lmdb", "path": "lib/train/dataset/lasot_lmdb.py", "snippet": "class Lasot_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_lmdb_dir if root is None else root\n super().__init__('LaSOT_lmdb', root, image_loader)\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\n self.class_list = []\n for ele in class_list:\n if ele not in self.class_list:\n self.class_list.append(ele)\n # Keep a list of all classes\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\n occlusion = torch.ByteTensor(occ_list)\n out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\n out_of_view = torch.ByteTensor(out_view_list)\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID_lmdb", "path": "lib/train/dataset/imagenetvid_lmdb.py", "snippet": "class ImagenetVID_lmdb(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid_lmdb\", root, image_loader)\n\n sequence_list_dict = decode_json(root, \"cache.json\")\n self.sequence_list = sequence_list_dict\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid_lmdb'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return decode_img(self.root, frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "MSCOCOSeq_lmdb", "path": "lib/train/dataset/coco_seq_lmdb.py", "snippet": "class MSCOCOSeq_lmdb(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO_lmdb', root, image_loader)\n self.root = root\n self.img_pth = 'images/{}{}/'.format(split, version)\n self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\n\n # Load the COCO set.\n print('loading annotations into memory...')\n tic = time.time()\n coco_json = decode_json(root, self.anno_path)\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n\n self.coco_set = COCO(coco_json)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n # img = self.image_loader(os.path.join(self.img_pth, path))\n img = decode_img(self.root, os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "TrackingNet_lmdb", "path": "lib/train/dataset/tracking_net_lmdb.py", "snippet": "class TrackingNet_lmdb(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_lmdb_dir if root is None else root\n super().__init__('TrackingNet_lmdb', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\n train_cls=False, pos_prob=0.5):\n def __len__(self):\n def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\n allow_invisible=False, force_invisible=False):\n def __getitem__(self, index):\n def getitem(self):\n def getitem_cls(self):\n def get_center_box(self, H, W, ratio=1/8):\n def sample_seq_from_dataset(self, dataset, is_video_dataset):\n def get_one_search(self):\n def get_frame_ids_trident(self, visible):\n def get_frame_ids_stark(self, visible, valid):\nclass TrackingSampler(torch.utils.data.Dataset):\n H, W, _ = template_frames[0].shape\n H, W, _ = template_frames[0].shape\n H, W, _ = search_frames[0].shape" }, { "identifier": "processing", "path": "lib/train/data/processing.py", "snippet": "def stack_tensors(x):\n def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None, joint_transform=None):\n def __call__(self, data: TensorDict):\n def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\n mode='pair', settings=None, *args, **kwargs):\n def _get_jittered_box(self, box, mode):\n def __call__(self, data: TensorDict):\nclass BaseProcessing:\nclass STARKProcessing(BaseProcessing):" }, { "identifier": "LTRLoader", "path": "lib/train/data/loader.py", "snippet": "class LTRLoader(torch.utils.data.dataloader.DataLoader):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n\n Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\n select along which dimension the data should be stacked to form a batch.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset. If specified, ``shuffle`` must be False.\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\n indices at a time. Mutually exclusive with batch_size, shuffle,\n sampler, and drop_last.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process.\n (default: 0)\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n into CUDA pinned memory before returning them.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative. (default: 0)\n worker_init_fn (callable, optional): If not None, this will be called on each\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n input, after seeding and before data loading. (default: None)\n\n .. note:: By default, each worker will have its PyTorch seed set to\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\n by main process using its RNG. However, seeds for other libraries\n may be duplicated upon initializing workers (w.g., NumPy), causing\n each worker to return identical random numbers. (See\n :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n use ``torch.initial_seed()`` to access the PyTorch seed for each\n worker in :attr:`worker_init_fn`, and use it to set other seeds\n before data loading.\n\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n unpicklable object, e.g., a lambda function.\n \"\"\"\n\n __initialized = False\n\n def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None):\n if collate_fn is None:\n if stack_dim == 0:\n collate_fn = ltr_collate\n elif stack_dim == 1:\n collate_fn = ltr_collate_stack1\n else:\n raise ValueError('Stack dim no supported. Must be 0 or 1.')\n\n super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n num_workers, collate_fn, pin_memory, drop_last,\n timeout, worker_init_fn)\n\n self.name = name\n self.training = training\n self.epoch_interval = epoch_interval\n self.stack_dim = stack_dim" }, { "identifier": "opencv_loader", "path": "lib/train/data/image_loader.py", "snippet": "def opencv_loader(path):\n \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\n try:\n im = cv.imread(path, cv.IMREAD_COLOR)\n\n # convert to rgb and return\n return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n except Exception as e:\n print('ERROR: Could not read image \"{}\"'.format(path))\n print(e)\n return None" }, { "identifier": "is_main_process", "path": "lib/utils/misc.py", "snippet": "def is_main_process():\n return get_rank() == 0" } ]
import os import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
18,154
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else:
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else:
datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader))
3
2023-12-10 03:57:19+00:00
24k
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n list of dict: The list of queue pair (QP) information if successful or None otherwise.\n The list of QP information is in the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n \"\"\"\n try:\n with open(switch_msg_snapshot, 'r') as stream:\n qp_info_list = yaml.safe_load(stream)\n except:\n logging.error(\"Read switch message snapshot %s error.\" % switch_msg_snapshot)\n return None\n\n logging.info(\"Read switch message snapshot %s.\" % switch_msg_snapshot)\n return qp_info_list" }, { "identifier": "Orchestrator", "path": "lumina/orchestrator/main.py", "snippet": "class Orchestrator:\n \"\"\" Class to manage the experiment \"\"\"\n def __init__(self, config_file):\n \"\"\" Constructor for Orchestrator class\n\n Args:\n config_file (str): path to the yaml (config) file.\n The file contains configs for switch, requester, responder, traffic, etc.\n\n Returns:\n N/A\n \"\"\"\n with open(config_file, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n local_workspace = conf['local-workspace']\n result_path = conf['result-path']\n switch_conf = conf['switch']\n requester_conf = conf['requester']\n responder_conf = conf['responder']\n requester_mirror_conf = conf['requester-mirror']\n responder_mirror_conf = conf['responder-mirror']\n traffic_conf = conf['traffic']\n rewrite_udp_dst_port = conf['rewrite-udp-dst-port']\n num_repeats = conf['num-repeats']\n agg_pcap_filename = conf['aggregate-pcap-filename']\n except KeyError as e:\n print(\"Config file %s has a bad yaml format (key error: %s)\" % (config_file, e))\n sys.exit(-1)\n\n switch_conf['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n requester_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n responder_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n\n self.local_workspace = local_workspace\n self.result_path = result_path\n self.traffic_conf = traffic_conf\n self.num_repeats = num_repeats\n self.switch = switch.Switch(switch_conf)\n self.requester = host.RDMAHost(requester_conf)\n self.responder = host.RDMAHost(responder_conf)\n self.requester_mirror = host.MirrorHost(requester_mirror_conf)\n self.responder_mirror = host.MirrorHost(responder_mirror_conf)\n self.aggregate_pcap_filename = agg_pcap_filename\n\n cmd = \"mkdir -p %s\" % self.result_path\n subprocess.call(cmd, shell = True)\n\n def rm_old_files(self):\n \"\"\" Remove result files left by previous experiments \"\"\"\n old_iter_id = 0\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n while os.path.exists(old_iter_result_path) and not os.path.isfile(old_iter_result_path):\n cmd = \"rm -rf %s\" % (old_iter_result_path)\n subprocess.call(cmd, shell=True)\n\n old_iter_id += 1\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n def get_requester_ip_list(self):\n \"\"\" Return the list of requester IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.requester.conf['nic']['ip-list']]\n\n def get_responder_ip_list(self):\n \"\"\" Return the list of responder IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.responder.conf['nic']['ip-list']]\n\n def get_num_repeats(self):\n \"\"\" Return the number of experiment repeats \"\"\"\n return self.num_repeats\n\n def sync_and_compile(self):\n \"\"\" Syncronize and compile the code on all the hosts\n\n Returns:\n bool: True if the code is synced and compiled successfully, False otherwise\n \"\"\"\n logging.info(\"Sync and compile the code\")\n\n ## Sync and compile the switch code\n ret = self.switch.sync_and_compile(self.local_workspace,\n switch.SWITCH_PROG_DIR_NAME,\n switch.SWITCH_PROG_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the switch code\")\n return False\n\n ## Sync and compile the traffic generator code\n rdma_verb = self.traffic_conf['rdma-verb'].strip().lower()\n if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:\n logging.error(\"Invalid RDMA verb: %s\" % rdma_verb)\n return False\n\n ret = self.requester.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_client_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on requester\")\n return False\n\n ret = self.responder.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_server_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on responder\")\n return False\n\n ret = self.requester.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on requester\")\n return False\n\n ret = self.responder.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on responder\")\n return False\n\n ## Sync and compile the packet capture code\n ret = self.requester_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on requester_mirror\")\n return False\n\n ret = self.responder_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on responder_mirror\")\n return False\n\n return True\n\n def generate_switch_table_config(self):\n \"\"\" Generate the switch configuration, including:\n 1. Forward table\n 2. Mirror table\n 3. ARP table\n 4. Traffic table, including the events to inject\n\n Returns:\n bool: True if the switch configuration is generated successfully, False otherwise\n \"\"\"\n requester_nic_conf = self.requester.conf['nic']\n responder_nic_conf = self.responder.conf['nic']\n requester_mirror_nic_conf = self.requester_mirror.conf['nic']\n responder_mirror_nic_conf = self.responder_mirror.conf['nic']\n\n ## Set up forward table entries\n self.switch.conf['forward-table'] = []\n try:\n for nic_conf, host_type in zip([requester_nic_conf, responder_nic_conf, \\\n requester_mirror_nic_conf, responder_mirror_nic_conf],\n ['requester', 'responder', 'requester_mirror', 'responder_mirror']):\n forward_table_entry = {'dst-mac': nic_conf['mac'],\n 'eg-port': nic_conf['switch-port'],\n 'host': host_type}\n self.switch.conf['forward-table'].append(forward_table_entry)\n except:\n logging.error(\"Failed to set forward table\")\n return False\n\n ## Set up mirror table entries, use ingress_to_egress\n try:\n requester_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': requester_nic_conf['switch-port'],\n 'dst-port': requester_mirror_nic_conf['switch-port']}\n\n responder_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': responder_nic_conf['switch-port'],\n 'dst-port': responder_mirror_nic_conf['switch-port']}\n self.switch.conf['mirror-table'] = [requester_mirror_entry, responder_mirror_entry]\n except:\n logging.error(\"Failed to set mirror table\")\n return False\n\n requester_mac = requester_nic_conf['mac']\n responder_mac = responder_nic_conf['mac']\n requester_ip_list = requester_nic_conf['ip-list']\n responder_ip_list = responder_nic_conf['ip-list']\n ## Set up arp table entries\n arp_entries = []\n try:\n for dst_ip_list, dst_mac in zip([requester_ip_list, responder_ip_list],\n [requester_mac, responder_mac]):\n for dst_ip_subnet in dst_ip_list:\n dst_ip = dst_ip_subnet.split('/')[0]\n arp_entries.append({'dst-ip': dst_ip, 'dst-mac': dst_mac})\n self.switch.conf['arp-table'] = arp_entries\n except:\n logging.error(\"Failed to set ARP table\")\n return False\n\n ## Generate the events of each iteration for switch config\n per_iter_event_list = self.traffic_conf['data-pkt-events']\n msg_size = self.traffic_conf['message-size']\n mtu = self.traffic_conf['mtu']\n num_msgs_per_qp = self.traffic_conf['num-msgs-per-qp']\n num_pkts_per_msg = int(math.ceil(msg_size / mtu))\n self.switch.conf['traffic'] = {}\n self.switch.conf['traffic']['num-msgs-per-qp'] = num_msgs_per_qp\n self.switch.conf['traffic']['num-pkts-per-msg'] = num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'] = []\n\n if per_iter_event_list is None or len(per_iter_event_list) == 0:\n ## No events at all\n return True\n\n for i in range(num_msgs_per_qp):\n for per_iter_event in per_iter_event_list:\n global_event = copy.deepcopy(per_iter_event)\n\n ## This event is applied to all the packets of the message. We need to expand it!\n if str(global_event['psn']).lower() == 'all':\n for psn in range(num_pkts_per_msg):\n global_event['psn'] = psn + i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n else:\n global_event['psn'] += i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n\n return True\n\n def ping_mesh(self):\n \"\"\" Ping all the IP addresses between requester and responder to check the connectivity\n\n Returns:\n bool: True if all the IP addresses can be pinged successfully, False otherwise\n \"\"\"\n for requester_ip_subnet in self.requester.conf['nic']['ip-list']:\n requester_ip = requester_ip_subnet.split('/')[0]\n command = \"ping \" + requester_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.responder.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + requester_ip)\n logging.error(\"[Command return info]: %s %s\" % (', '.join(ret_val), ', '.join(err_info)))\n return False\n\n for responder_ip_subnet in self.responder.conf['nic']['ip-list']:\n responder_ip = responder_ip_subnet.split('/')[0]\n command = \"ping \" + responder_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.requester.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + responder_ip)\n logging.error(\"[Command return info]: %s %s\" % (ret_val, err_info))\n return False\n\n logging.info(\"Successfully pinged all the IP addresses between requester and responder\")\n return True\n\n def generate_switch_config_file(self):\n \"\"\" Generate the switch configuration file and copy it to the switch\n\n Returns:\n bool: True if the switch configuration file is generated and copied successfully, False otherwise\n \"\"\"\n ## Get the mac address for all the hosts\n self.requester.get_mac_address()\n self.responder.get_mac_address()\n self.requester_mirror.get_mac_address()\n self.responder_mirror.get_mac_address()\n\n ## Generate config for Match-Action table in switch\n if self.generate_switch_table_config() == False:\n logging.error(\"Failed to generate switch table configuration\")\n return False\n\n ## Dump the switch configuration into a file, and copy it to the switch\n if self.switch.dump_controller_config(self.local_workspace) == False:\n logging.error(\"Failed to dump switch config\")\n return False\n\n return True\n\n def __is_valid_traffc(self):\n \"\"\" Check if the traffic configuration is valid, including:\n 1. The tx-depth should be 1 or > 1\n 2. If tx-depth > 1, then we can only inject ECN marking events\n\n Returns:\n bool: True if the traffic configuration is valid, False otherwise\n \"\"\"\n try:\n data_pkt_events = self.traffic_conf['data-pkt-events']\n tx_depth = self.traffic_conf['tx-depth']\n\n if tx_depth == 1:\n return True\n elif tx_depth <= 0:\n return False\n\n for event in data_pkt_events:\n if event['type'] != 'ecn':\n logging.error(\"Cannot inject %s event when tx depth = %d\" % (event['type'], tx_depth))\n return False\n except:\n logging.error(\"Failed to parse traffic configuration\")\n return False\n\n return True\n\n def run_experiment(self):\n \"\"\" Run the experiment\n\n Returns:\n bool: True if the experiment is completed successfully, False otherwise\n \"\"\"\n\n ## Check if traffic configuration is valid\n if self.__is_valid_traffc() == False:\n logging.error(\"Invalid traffic configuration\")\n return False\n\n ## Run switch program\n if self.switch.run_switch() == False:\n logging.error(\"Failed to run switch\")\n return False\n\n ## Sleep for 1 second to make sure control plane is listenning (for client message)\n time.sleep(1)\n\n ## Configure the servers\n if self.requester.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA requester\")\n return False\n\n if self.responder.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA responder\")\n return False\n\n if self.requester_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on responder mirror\")\n return False\n\n ## Check the connectivity through pingmesh (try 5 rounds)\n num_tries = 0\n pingmesh_ret = False\n\n while num_tries < 5:\n pingmesh_ret = self.ping_mesh()\n if pingmesh_ret == True:\n break\n num_tries += 1\n time.sleep(1)\n\n if pingmesh_ret == False:\n logging.error(\"Failed to ping all the IP addresses between requester and responder\")\n return False\n\n ## Launch packet capture for both side\n ## Prerequisite: config hugepage and igb_uio if needed\n if self.requester_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on responder mirror\")\n return False\n\n time.sleep(3)\n\n ## Dump the counters before running\n if self.requester.dump_counters(host.REQ_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester before running\")\n return False\n\n if self.responder.dump_counters(host.RSP_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder before running\")\n return False\n\n ## Launch RDMA server first\n run_server_ret = self.responder.run_traffic_gen_server(self.traffic_conf)\n if run_server_ret == False:\n logging.error(\"Failed to run RDMA server\")\n return False\n\n time.sleep(2)\n\n ## Launch RDMA client\n try:\n destination_ip_subnet = self.responder.conf['nic']['ip-list'][0]\n destination_ip = destination_ip_subnet.split('/')[0]\n except:\n logging.error(\"Failed to get destination IP\")\n return False\n\n run_client_ret = self.requester.run_traffic_gen_client(traffic_conf=self.traffic_conf,\n destination_ip=destination_ip,\n controller_ip=self.switch.conf['control-ip'],\n controller_listen_port=self.switch.conf['listen-port'])\n if run_client_ret == False:\n logging.error(\"Failed to run RDMA client\")\n return False\n\n if self.switch.dump_results() == False:\n logging.error(\"Failed to dump results from switch\")\n return False\n\n if self.requester.dump_counters(host.REQ_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester after running\")\n return False\n\n if self.responder.dump_counters(host.RSP_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder after running\")\n return False\n\n logging.info(\"Experiment completed successfully\")\n return True\n\n def clean_up(self):\n \"\"\" Clean up the environment after the experiment\n\n Returns:\n bool: True if the clean up is completed successfully, False otherwise\n \"\"\"\n logging.info(\"Start cleaning up the environment\")\n\n if self.switch.clean_up() == False:\n logging.error(\"Failed to clean up switch\")\n return False\n\n if self.requester.clean_up() == False:\n logging.error(\"Failed to clean up requester\")\n return False\n\n if self.responder.clean_up() == False:\n logging.error(\"Failed to clean up responder\")\n return False\n\n if self.requester_mirror.clean_up() == False:\n logging.error(\"Failed to clean up requester mirror\")\n return False\n\n if self.responder_mirror.clean_up() == False:\n logging.error(\"Failed to clean up responder mirror\")\n return False\n\n return True\n\n def fetch_results(self, iter_id=0):\n \"\"\" Fetch the results of iteration 'iter_id', including:\n 1. Switch table entries and counters\n 2. Packet trace (pcap file)\n 3. Configs and end-to-end results from RDMA hosts\n\n Args:\n iter_id (int, optional): iteration ID, defaults to 0\n\n Returns:\n bool: True if the result collection is completed successfully, False otherwise\n \"\"\"\n ## Make the results dir if it does not exist\n iter_result_path = os.path.join(self.result_path, str(iter_id))\n cmd = \"mkdir -p %s\" % iter_result_path\n try:\n subprocess.call(cmd, shell=True)\n except:\n logging.error(\"Failed to create result directory %s\" % iter_result_path)\n return False\n\n if self.switch.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from switch\")\n return False\n\n if self.requester_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester mirror\")\n return False\n\n if self.responder_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder mirror\")\n return False\n\n if self.requester.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester\")\n return False\n\n if self.responder.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder\")\n return False\n\n logging.info(\"Finished fetching results for iteration %d\" % iter_id)\n return True\n\n def merge_traces(self, iter_id=0):\n iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR)\n src_pcap_file_list = [os.path.join(iter_pcap_dir_path,\n self.requester_mirror.conf['pkt-dump-conf']['dump-filename']),\n os.path.join(iter_pcap_dir_path,\n self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])]\n target_pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = pcap_process.merge_pcaps(src_pcap_file_list)\n if packet_list is None:\n logging.error(\"Failed to merge pcap files for iteration %d\" % iter_id)\n return False\n\n if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False:\n logging.error(\"Failed to dump packets to pcap file %s\" % target_pcap_path)\n return False\n\n logging.info(\"Successfully merged pcap files for iteration %d\" % iter_id)\n\n def check_integrity(self, iter_id=0):\n ## Check if the collected packet trace passes integrity check\n pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = get_packet_list(pcap_path)\n packet_list.sort(key=lambda x:x.get_switch_seqnum())\n logging.info(\"Packet trace sorted by switch sequence number.\")\n\n switch_state_snapshot = os.path.join(self.result_path,\n str(iter_id),\n switch.SWITCH_RESULT_DIR,\n switch.SWITCH_STATE_SNAPSHOT)\n port_map = {'requester': self.requester.conf['nic']['switch-port'],\n 'responder': self.responder.conf['nic']['switch-port'],\n 'requester-mirror': self.requester_mirror.conf['nic']['switch-port'],\n 'responder-mirror': self.responder_mirror.conf['nic']['switch-port']}\n switch_counter = SwitchCounter(switch_state_snapshot, port_map)\n\n integrity_checker = IntegrityCheck(packet_list=packet_list,\n switch_counter=switch_counter,\n requester_ip_list=self.get_requester_ip_list(),\n responder_ip_list=self.get_responder_ip_list())\n\n if integrity_checker.check() == True:\n logging.info(\"Integrity check passed\")\n return True\n else:\n logging.info(\"Integrity check failed\")\n return False" }, { "identifier": "SwitchCounter", "path": "lumina/analyzer/counter/switch_counter.py", "snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress': counter_value, 'egress': counter_value},\n 'responder': {'ingress': counter_value, 'egress': counter_value},\n 'requester-mirror': {'ingress': counter_value, 'egress': counter_value},\n 'responder-mirror': {'ingress': counter_value, 'egress': counter_value}}\n \"\"\"\n def __init__(self, snapshot_filename, port_map):\n \"\"\" Constructor\n\n Args:\n snapshot_filename (str): the file where switch dumps its counters\n port_map (dict): the mapping between port name and port number\n\n Returns:\n N/A\n \"\"\"\n with open(snapshot_filename, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n ingress_counters = conf['counter']['ingress']\n egress_counters = conf['counter']['egress']\n except:\n print(\"Bad yaml format in %s\" % snapshot_filename)\n sys.exit(-1)\n\n requester_port = port_map['requester']\n responder_port = port_map['responder']\n requester_mirror_port = port_map['requester-mirror']\n responder_mirror_port = port_map['responder-mirror']\n\n self._counter = {'requester' : {'ingress':0, 'egress': 0},\n 'responder' : {'ingress':0, 'egress': 0},\n 'requester-mirror' : {'ingress':0, 'egress': 0},\n 'responder-mirror' : {'ingress':0, 'egress': 0}}\n try:\n self._counter['requester']['ingress'] = ingress_counters[requester_port]\n self._counter['responder']['ingress'] = ingress_counters[responder_port]\n self._counter['requester-mirror']['ingress'] = ingress_counters[requester_mirror_port]\n self._counter['responder-mirror']['ingress'] = ingress_counters[responder_mirror_port]\n\n self._counter['requester']['egress'] = egress_counters[requester_port]\n self._counter['responder']['egress'] = egress_counters[responder_port]\n self._counter['requester-mirror']['egress'] = egress_counters[requester_mirror_port]\n self._counter['responder-mirror']['egress'] = egress_counters[responder_mirror_port]\n\n except:\n print(\"Port number not exist in the switch snapshot\")\n sys.exit(-1)\n\n def get_counter(self):\n \"\"\" Return the switch counters (dict of dict) \"\"\"\n return self._counter" }, { "identifier": "MLNXHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class MLNXHostCounter(HostCounter):\n \"\"\" Class to parse MLNX host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_port_rcv_packets(self):\n \"\"\" Return the number of received packets \"\"\"\n return self._counter['port-counters']['port_rcv_packets']\n\n def get_port_xmit_packets(self):\n \"\"\" Return the number of transmitted packets \"\"\"\n return self._counter['port-counters']['port_xmit_packets']\n\n def get_num_packet_seq_err(self):\n \"\"\" Return the number of received NAK sequence error packets \"\"\"\n return self._counter['hw-counters']['packet_seq_err']\n\n def get_num_out_of_sequence(self):\n \"\"\" Return the number of out-of-sequence packets received \"\"\"\n return self._counter['hw-counters']['out_of_sequence']\n\n def get_num_dup_requests(self):\n \"\"\" Return the number of duplicate requests \"\"\"\n return self._counter['hw-counters']['duplicate_request']\n\n def implied_nak_seq_err(self):\n \"\"\" Return the number of READ requests implying sequence errors \"\"\"\n return self._counter['hw-counters']['implied_nak_seq_err']\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['np_cnp_sent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['np_ecn_marked_roce_packets']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['rp_cnp_handled']\n\n def get_num_icrc_errors(self):\n \"\"\" Return the number of RoCE packets with ICRC errors received \"\"\"\n return self._counter['hw-counters']['rx_icrc_encapsulated']\n\n def get_num_timeout_err(self):\n \"\"\" Return the number of times QP's ack timer expired for RC, XRC, DCT QPs at the sender side \"\"\"\n return self._counter['hw-counters']['local_ack_timeout_err']\n\n def get_num_discards_dict_tx(self):\n \"\"\" Return the number of TX discarded packets (dict)\"\"\"\n discards_dict_tx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'tx' in x:\n discards_dict_tx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_tx\n\n def get_num_discards_dict_rx(self):\n \"\"\" Return the number of RX discarded packets (dict) \"\"\"\n discards_dict_rx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'rx' in x:\n discards_dict_rx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_rx" }, { "identifier": "IntelHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class IntelHostCounter(HostCounter):\n \"\"\" Class to parse Intel host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['cnpSent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['RxECNMrkd']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['cnpHandled']\n\n def get_num_discards_dict(self):\n \"\"\" Return the number of discarded packets (dict) \"\"\"\n discards_dict= {}\n for x in self._counter['hw-counters'].keys():\n if 'discard' in x:\n discards_dict[x] = self._counter['hw-counters'][x]\n return discards_dict" }, { "identifier": "get_packet_list", "path": "lumina/analyzer/pcap_processor/pcap_process.py", "snippet": "def get_packet_list(pcap_file):\n \"\"\" Read a pcap file and return a list of packets\n\n Args:\n pcap_file (str): The pcap file to read\n\n Returns:\n list: The list of packets if successful, empty list otherwise\n\n Raises:\n IOError: If the pcap file cannot be opened for reading\n Exception: If the pcap file cannot be read\n \"\"\"\n packet_list = []\n try:\n with open(pcap_file, 'rb') as file_read:\n pcap = dpkt.pcap.Reader(file_read)\n for packet in pcap:\n packet_list.append(roce_packet.RRoCEPacket(packet))\n except IOError:\n logging.error(\"Unable to open pcap file %s. Please check your filename.\" % pcap_file)\n raise IOError\n\n except:\n logging.error(\"Failed to read pcap file %s.\" % pcap_file)\n raise Exception\n\n logging.info(\"Successfully read %d packets from %s.\" % (len(packet_list), pcap_file))\n return packet_list" }, { "identifier": "LatencyMeasure", "path": "lumina/analyzer/measurer/latency_measure.py", "snippet": "class LatencyMeasure:\n \"\"\" Class to measure the latency between packets for some events,\n e.g., NACK latency, Retransmission latency, CNP latency\n\n Attributes:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb\n \"\"\"\n def __init__(self, packet_list, qp_info_list, is_read=False):\n \"\"\" Constructor\n\n Args:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb (default: False)\n\n Returns:\n N/A\n \"\"\"\n self.packet_list = packet_list\n self.qp_info_list = qp_info_list\n self.is_read = is_read\n\n def get_peer_qp_info(self, dest_qpn, dest_ip):\n \"\"\" Get the info of the peer QP (qpn, ip) of a given qp (qpn, ip)\n\n Args:\n dest_qpn (int): destination QP number\n dest_ip (str): destination IP\n\n Returns:\n int: peer QP number (None if not found)\n str: peer IP (None if not found)\n \"\"\"\n for qp_info in self.qp_info_list:\n if qp_info['qpn_snd'] == dest_qpn and qp_info['ip_snd'] == dest_ip:\n return qp_info['qpn_rcv'], qp_info['ip_rcv']\n elif qp_info['qpn_rcv'] == dest_qpn and qp_info['ip_rcv'] == dest_ip:\n return qp_info['qpn_snd'], qp_info['ip_snd']\n\n return None, None\n\n def get_bit_error_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with bit error flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with bit error flag\n \"\"\"\n error_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_bit_error() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n error_pkt_list.append(packet)\n\n return error_pkt_list\n\n def get_dropped_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with drop flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with drop flag\n \"\"\"\n dropped_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_dropped() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n dropped_pkt_list.append(packet)\n\n return dropped_pkt_list\n\n def get_ecn_pkts(self):\n \"\"\" Get the packets marked with ECN\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with ECN\n \"\"\"\n ecn_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_ecn():\n ecn_pkt_list.append(packet)\n\n return ecn_pkt_list\n\n def get_cnp_pkts(self):\n \"\"\" Get the congestion notification packets\n\n Returns:\n list of RRoCEPacket objects: the list of congestion notification packets\n \"\"\"\n cnp_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_cnp():\n cnp_pkt_list.append(packet)\n\n return cnp_pkt_list\n\n def get_undelivered_pkts(self, relative_dest_qpn = None):\n \"\"\" Get the undelivered packets (dropped or marked with bit error)\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of undelivered packets\n \"\"\"\n undelivered_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_delivered() == True:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n undelivered_pkt_list.append(packet)\n\n return undelivered_pkt_list\n\n def get_nack(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return the NACK packet that triggers its retransmission.\n If there's no NACK packet found for the undelivered packet, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet that triggers retransmission\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the NACK packet that triggers the retransmission of the undelivered packet\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() == undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_qp_first_nack_before_retrans(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the first NACK packet on its QP between it and its retransmission.\n If there's no NACK packet found before the retransmission, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the first NACK packet on the QP between the undelivered packet and its retransmission\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() <= undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return packet\n\n return None\n\n def get_qp_next_delivered_pkt(self, current_pkt):\n \"\"\" For a packet, return the next delivered packet on the same QP.\n\n Args:\n current_pkt (RRoCEPacket object): the current packet\n\n Returns:\n RRoCEPacket object: the next delivered packet on the same QP (None if not found)\n \"\"\"\n switch_seqnum = current_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_qp_roce_data_pkt(packet, current_pkt) and \\\n packet.get_switch_seqnum() > switch_seqnum and \\\n packet.is_delivered():\n return packet\n\n return None\n\n def get_retransmit_pkt(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return its retransmission packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the retransmission packet of the undelivered packet (None if not found)\n \"\"\"\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_latency_between_pkts(self, packet_alpha, packet_beta):\n \"\"\" Return the time of packet_beta - time of packet_alpha in seconds\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n float: the time difference between two packets in seconds\n \"\"\"\n return packet_beta.get_switch_timestamp() - packet_alpha.get_switch_timestamp()\n\n def is_same_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are the same RoCE data packet (same src ip, dst ip, dest qp, and psn)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are the same RoCE data packet, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp() and \\\n packet_alpha.get_roce_pkt_seq() == packet_beta.get_roce_pkt_seq()\n\n def is_same_qp_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are RoCE data packets on the same QP (same src ip, dst ip, and dest qp)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are RoCE data packets on the same QP, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp()\n\n def get_qp_next_delivered_pkt_latency(self, pkt):\n \"\"\" Get the latency between 'pkt' and next 'delivered' packet on the same QP\n\n Args:\n pkt (RRoCEPacket object): the packet\n\n Returns:\n float: the latency between 'pkt' and next 'delivered' packet on the same QP\n (None if not found)\n \"\"\"\n\n next_pkt = self.get_qp_next_delivered_pkt(pkt)\n if next_pkt is None:\n return None\n\n return self.get_latency_between_pkts(pkt, next_pkt)\n\n def get_nack_gen_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK generation latency, i.e., the duration from the detection of\n the undelivered packet to the generation of the NACK packet that triggers its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK generation latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n # NACK should be triggered by the next delivered packet on the same QP\n next_delivered_pkt = self.get_qp_next_delivered_pkt(undelivered_pkt)\n if self.is_same_roce_data_pkt(next_delivered_pkt, undelivered_pkt):\n # We should never reach here\n return None\n\n nack_gen_latency = self.get_latency_between_pkts(next_delivered_pkt, nack_pkt)\n return nack_gen_latency\n\n def get_nack_resp_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK response latency, i.e., the duration from the generation of\n the NACK packet to the retransmission of this undelivered packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK response latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n nack_resp_latency = self.get_latency_between_pkts(nack_pkt, retransmit_pkt)\n return nack_resp_latency\n\n def get_retransmit_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the retransmission latency, i.e., the duration from the packet\n to its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the retransmission latency for the undelivered packet (None if not found)\n \"\"\"\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n return retransmit_latency\n\n def get_nack_gen_latency_list(self, relative_dest_qpn=None):\n \"\"\" Return a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n nack_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n nack_latency_list.append(None)\n else:\n nack_latency = self.get_latency_between_pkts(undelivered_pkt, nack_pkt)\n nack_latency_list.append(nack_latency)\n\n return nack_latency_list\n\n def get_retransmit_latency_list(self, relative_dest_qpn):\n \"\"\" Return a list of retransmission latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of retransmission latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n retransmit_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n retransmit_latency_list.append(None)\n else:\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n retransmit_latency_list.append(retransmit_latency)\n\n return retransmit_latency_list" }, { "identifier": "config_stream_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n logger.addHandler(console)" }, { "identifier": "config_file_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_file_handler(logger, log_file, no_format=False):\n \"\"\" Configure file handler\n\n Args:\n logger (logging.Logger): Logger object\n log_file (str): Log file path\n no_format (bool): If True, do not format log messages (default: False)\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n if no_format == False:\n file_handler.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)" }, { "identifier": "TRIGGER_OOS", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_OOS = 1" }, { "identifier": "TRIGGER_TIMEOUT", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_TIMEOUT = 2" } ]
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
15,084
elif trigger == TRIGGER_TIMEOUT: nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: nack = latency_measurement.get_qp_first_nack_before_retrans(pkt) if nack is None: logger.error("\t\t Cannot find the NACK READ request to recover this lost packet") return trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME) switch_counter = SwitchCounter(switch_state_snapshot, port_map) if orchestrator.requester.is_mlnx_nic():
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: nack = latency_measurement.get_qp_first_nack_before_retrans(pkt) if nack is None: logger.error("\t\t Cannot find the NACK READ request to recover this lost packet") return trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME) switch_counter = SwitchCounter(switch_state_snapshot, port_map) if orchestrator.requester.is_mlnx_nic():
requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish)
3
2023-12-09 08:21:14+00:00
24k
ebb-earl-co/tidal-wave
tidal_wave/main.py
[ { "identifier": "login", "path": "tidal_wave/login.py", "snippet": "def login(\n audio_format: AudioFormat,\n) -> Tuple[Optional[requests.Session], Optional[AudioFormat]]:\n \"\"\"Given a selected audio_format, either log in \"automatically\"\n via the Fire TV OAuth 2.0 flow, or ask for an Android-/Windows-/MacOS-\n gleaned API token; the latter to be able to access HiRes fLaC audio.\n Returns a tuple of a requests.Session object, if no error, and the\n AudioFormat instance passed in; or (None, \"\") in the event of error.\n \"\"\"\n android_formats: Set[AudioFormat] = {\n AudioFormat.sony_360_reality_audio,\n AudioFormat.hi_res,\n }\n fire_tv_formats: Set[AudioFormat] = {\n AudioFormat.dolby_atmos,\n AudioFormat.mqa,\n AudioFormat.lossless,\n AudioFormat.high,\n AudioFormat.low,\n }\n if audio_format in fire_tv_formats:\n return (login_fire_tv(), audio_format)\n elif audio_format in android_formats:\n options: set = {\"android\", \"a\", \"windows\", \"w\"}\n _input: str = \"\"\n while _input not in options:\n _input = typer.prompt(\n \"For which of Android [a] or Windows [w] would you like to provide an API token?\"\n ).lower()\n else:\n if _input in {\"android\", \"a\"}:\n return (login_android(), audio_format)\n elif _input in {\"windows\", \"w\"}:\n return (login_windows(), audio_format)\n else:\n logger.critical(\n \"Please provide one of the following: \"\n f\"{', '.join(e.value for e in AudioFormat)}\"\n )\n return (None, \"\")" }, { "identifier": "AudioFormat", "path": "tidal_wave/login.py", "snippet": "class AudioFormat(str, Enum):\n sony_360_reality_audio = \"360\"\n dolby_atmos = \"Atmos\"\n hi_res = \"HiRes\"\n mqa = \"MQA\"\n lossless = \"Lossless\"\n high = \"High\"\n low = \"Low\"" }, { "identifier": "LogLevel", "path": "tidal_wave/login.py", "snippet": "class LogLevel(str, Enum):\n debug = \"DEBUG\" # 10\n info = \"INFO\" # 20\n warning = \"WARNING\" # 30\n error = \"ERROR\" # 40\n critical = \"CRITICAL\" # 50" }, { "identifier": "Album", "path": "tidal_wave/album.py", "snippet": "class Album:\n album_id: int\n\n def __post_init__(self):\n self.album_dir: Optional[Path] = None\n self.album_cover_saved: bool = False\n\n def get_items(self, session: Session):\n \"\"\"This method populates self.tracks by requesting from\n TIDAL albums/items endpoint.\"\"\"\n album_items: AlbumsItemsResponseJSON = request_album_items(\n session=session, identifier=self.album_id\n )\n _items = album_items.items if album_items is not None else ()\n self.tracks = tuple(_item.item for _item in _items)\n\n def get_metadata(self, session: Session):\n \"\"\"This method populates self.metadata by requesting from\n TIDAL /albums endpoint\"\"\"\n self.metadata: AlbumsEndpointResponseJSON = request_albums(\n session=session, identifier=self.album_id\n )\n\n def get_review(self, session: Session):\n \"\"\"This method requests the review corresponding to self.album_id\n in TIDAL. If it exists, it is written to disk as AlbumReview.json\n in self.album_dir\"\"\"\n self.album_review: Optional[AlbumsReviewResponseJSON] = request_album_review(\n session=session, identifier=self.album_id\n )\n if self.album_review is not None:\n (self.album_dir / \"AlbumReview.json\").write_text(\n self.album_review.to_json()\n )\n\n def set_dir(self, out_dir: Path):\n \"\"\"This method populates self.album_dir as a sub-subdirectory of\n out_dir: its parent directory is the name of the (main) artist of\n the album\"\"\"\n artist_substring: str = self.metadata.artist.name.replace(\"..\", \"\")\n album_substring: str = (\n f\"{self.metadata.name.replace('..', '')} \"\n f\"[{self.metadata.id}] [{self.metadata.release_date.year}]\"\n )\n self.album_dir = out_dir / artist_substring / album_substring\n self.album_dir.mkdir(parents=True, exist_ok=True)\n\n if self.metadata.number_of_volumes > 1:\n for v in range(1, self.metadata.number_of_volumes + 1):\n volume_substring: str = f\"Volume {v}\"\n (out_dir / artist_substring / album_substring / volume_substring).mkdir(\n parents=True, exist_ok=True\n )\n\n def save_cover_image(self, session: Session, out_dir: Path):\n \"\"\"This method writes cover.jpg in self.album_dir via the\n utils.download_cover_image() function. If successful,\n then self.album_cover_saved takes the value True\"\"\"\n if self.album_dir is None:\n self.set_dir(out_dir=out_dir)\n self.cover_path: Path = self.album_dir / \"cover.jpg\"\n if not self.cover_path.exists():\n download_cover_image(\n session=session,\n cover_uuid=self.metadata.cover,\n output_dir=self.album_dir,\n )\n else:\n self.album_cover_saved = True\n\n def get_tracks(\n self, session: Session, audio_format: AudioFormat, out_dir: Path\n ) -> List[Optional[str]]:\n \"\"\"This method uses self.tracks to call track.Track.get() for each\n track in self.tracks. It uses the result of each of these calls to\n populate self.track_files\"\"\"\n track_files: List[str] = [None] * self.metadata.number_of_tracks\n for i, t in enumerate(self.tracks): # type(t) is TracksEndpointResponseJSON\n track: Track = Track(track_id=t.id)\n\n track_files_value: Optional[str] = track.get(\n session=session,\n audio_format=audio_format,\n out_dir=out_dir,\n metadata=t,\n album=self.metadata,\n )\n track_files[i] = {track.metadata.track_number: track_files_value}\n else:\n self.track_files = track_files\n\n def dumps(self):\n \"\"\"This method returns a JSON-like string of self.track_files\"\"\"\n return json.dumps(self.track_files)\n\n def dump(self, fp=sys.stdout):\n \"\"\"This method writes to (by default) STDOUT a\n JSON-like string of self.track_files\"\"\"\n json.dump(self.track_files, fp)\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n metadata: Optional[AlbumsEndpointResponseJSON] = None,\n ):\n \"\"\"This method is the driver method of the class. It calls the\n other methods in order:\n 1. get_metadata()\n 2. get_items()\n 3. save_cover_image()\n 4. get_review()\n 5. get_tracks()\n \"\"\"\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n \n if self.metadata is None:\n self.track_files = {}\n return\n\n self.get_items(session)\n self.save_cover_image(session, out_dir)\n self.get_review(session)\n self.get_tracks(session, audio_format, out_dir)" }, { "identifier": "Artist", "path": "tidal_wave/artist.py", "snippet": "class Artist:\n artist_id: int\n\n def set_metadata(self, session: Session):\n \"\"\"This function requests from TIDAL API endpoint /artists and\n stores the results in self.metadata\"\"\"\n self.metadata: Optional[ArtistsEndpointResponseJSON] = request_artists(\n session, self.artist_id\n )\n\n def save_artist_image(self, session: Session):\n \"\"\"This method writes the bytes of self.metadata.picture to\n the file cover.jpg in self.artist_dir\"\"\"\n artist_image: Path = self.artist_dir / \"cover.jpg\"\n if not artist_image.exists():\n download_cover_image(\n session, self.metadata.picture, self.artist_dir, dimension=750\n )\n\n def set_albums(self, session: Session):\n \"\"\"This method requests from TIDAL API endpoint /artists/albums and\n stores the results in self.albums\"\"\"\n self.albums: Optional[ArtistsAlbumsResponseJSON] = request_artists_albums(\n session, self.artist_id\n )\n\n def set_audio_works(self, session: Session):\n \"\"\"This method requests from TIDAL API endpoint\n /artists/albums?filter=EPSANDSINGLES and stores the results in self.albums\"\"\"\n self.albums: Optional[ArtistsAlbumsResponseJSON] = request_artists_audio_works(\n session, self.artist_id\n )\n\n def set_videos(self, session: Session):\n \"\"\"This method requests from TIDAL API endpoint /artists/videos and\n stores the results in self.albums\"\"\"\n self.videos: Optional[ArtistsVideosResponseJSON] = request_artists_videos(\n session, self.artist_id\n )\n\n def set_dir(self, out_dir: Path):\n \"\"\"This method sets self.artist_dir and creates the directory on the file system\n if it does not exist\"\"\"\n self.name: str = self.metadata.name.replace(\"..\", \"\")\n self.artist_dir = out_dir / self.name\n self.artist_dir.mkdir(parents=True, exist_ok=True)\n\n def get_albums(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n include_eps_singles: bool = False,\n ) -> List[Optional[str]]:\n \"\"\"This method first fetches the total albums on TIDAL's service\n corresponding to the artist with ID self.artist_id. Then, each of\n the albums (and, optionally, EPs and singles) is requested and\n written to subdirectories of out_dir\"\"\"\n if include_eps_singles:\n self.set_audio_works(session)\n logger.info(\n f\"Starting attempt to get {self.albums.total_number_of_items} \"\n \"albums, EPs, and singles for artist with ID \"\n f\"{self.metadata.id}, '{self.name}'\"\n )\n else:\n self.set_albums(session)\n logger.info(\n f\"Starting attempt to get {self.albums.total_number_of_items} albums \"\n f\"for artist with ID {self.metadata.id}, '{self.name}'\"\n )\n\n for i, a in enumerate(self.albums.items):\n album: Album = Album(album_id=a.id)\n album.get(\n session=session,\n audio_format=audio_format,\n out_dir=out_dir,\n metadata=a,\n )\n\n def get_videos(\n self,\n session: Session,\n out_dir: Path,\n ) -> List[Optional[str]]:\n \"\"\"This method sets self.videos by calling self.set_videos()\n then, for each video, instantiates a Video object and executes\n video.get()\"\"\"\n self.set_videos(session)\n logger.info(\n f\"Starting attempt to get {self.videos.total_number_of_items} videos \"\n f\"for artist with ID {self.metadata.id}, '{self.name}'\"\n )\n for i, v in enumerate(self.videos.items):\n video: Video = Video(video_id=v.id)\n video.get(\n session=session,\n out_dir=out_dir,\n metadata=v,\n )\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n include_eps_singles: bool,\n ):\n \"\"\"This is the driver method of the class. It executes the other\n methods in order:\n 1. set_metadata\n 2. set_dir\n 3. save_artist_image\n 4. get_videos\n 5. get_albums\n \"\"\"\n self.set_metadata(session)\n \n if self.metadata is None:\n return\n \n self.set_dir(out_dir)\n self.save_artist_image(session)\n self.get_videos(session, out_dir)\n if include_eps_singles:\n self.get_albums(session, audio_format, out_dir, include_eps_singles=True)\n self.get_albums(session, audio_format, out_dir, include_eps_singles=False)" }, { "identifier": "Mix", "path": "tidal_wave/mix.py", "snippet": "class Mix:\n mix_id: str\n\n def __post_init__(self):\n self.mix_dir: Optional[Path] = None\n self.mix_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /playlists endpoint\"\"\"\n self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_mixes(\n session=session, mix_id=self.mix_id\n )\n \n if self.metadata is None:\n return\n \n self.name = (\n self.metadata.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n .replace(\"..\", \"\")\n )\n\n def set_items(self, session: Session):\n \"\"\"Uses data from TIDAL API /mixes/items endpoint to\n populate self.items\"\"\"\n mix_items: Optional[MixesItemsResponseJSON] = get_mix(\n session=session, mix_id=self.mix_id\n )\n if mix_items is None:\n self.items = tuple()\n else:\n self.items: Tuple[Optional[MixItem]] = tuple(mix_items.items)\n\n def set_dir(self, out_dir: Path):\n \"\"\"Populates self.mix_dir based on self.name, self.mix_id\"\"\"\n mix_substring: str = f\"{self.name} [{self.mix_id}]\"\n self.mix_dir: Path = out_dir / \"Mixes\" / mix_substring\n self.mix_dir.mkdir(parents=True, exist_ok=True)\n\n def save_cover_image(self, session: Session, out_dir: Path):\n \"\"\"Requests self.metadata.image and attempts to write it to disk\"\"\"\n if self.mix_dir is None:\n self.set_dir(out_dir=out_dir)\n self.cover_path: Path = self.mix_dir / \"cover.jpg\"\n if not self.cover_path.exists():\n with session.get(\n url=self.metadata.image, params={k: None for k in session.params}\n ) as r:\n (self.mix_dir / \"cover.jpg\").write_bytes(r.content)\n\n self.mix_cover_saved = True\n else:\n self.mix_cover_saved = True\n\n def get_items(self, session: Session, audio_format: AudioFormat):\n \"\"\"Using either Track.get() or Video.get(), attempt to request\n the data for each track or video in self.items\"\"\"\n if len(self.items) == 0:\n return\n tracks_videos: list = [None] * len(self.items)\n for i, item in enumerate(self.items):\n if item is None:\n tracks_videos[i] = None\n continue\n elif isinstance(item, TracksEndpointResponseJSON):\n track: Track = Track(track_id=item.id)\n track.get(\n session=session,\n audio_format=audio_format,\n out_dir=self.mix_dir,\n metadata=item,\n )\n tracks_videos[i] = track\n elif isinstance(item, VideosEndpointResponseJSON):\n video: Video = Video(video_id=item.id)\n video.get(\n session=session,\n out_dir=self.mix_dir,\n metadata=item,\n )\n tracks_videos[i] = video\n else:\n tracks_videos[i] = None\n continue\n else:\n self.tracks_videos: Tuple[\n Tuple[int, Optional[Union[Track, Video]]]\n ] = tuple(tracks_videos)\n return tracks_videos\n\n def flatten_mix_dir(self):\n \"\"\"When self.get_items() is called, the tracks and/or videos in\n self.items are downloaded using their self-contained .get() logic;\n this means that they will be downloaded to albums. This function\n \"flattens\" self.mix_dir, meaning that it moves all downloaded\n audio and video files to self.mix_dir, and removes the various\n subdirectories created\"\"\"\n files: List[Dict[int, Optional[str]]] = [None] * len(self.tracks_videos)\n if len(self.tracks_videos) == 0:\n return\n subdirs: Set[Path] = set()\n\n for i, tv in enumerate(self.tracks_videos, 1):\n if getattr(tv, \"outfile\") is None:\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n files[i - 1] = {i: None}\n continue\n\n _path: Optional[Path] = Path(tv.outfile) if tv is not None else None\n # if the item never got turned into a track or video\n if _path is None:\n files[i - 1] = {i: None}\n continue\n\n # if the track or video didn't download\n if _path.exists():\n if _path.stat().st_size == 0:\n files[i - 1] = {i: None}\n continue\n else:\n files[i - 1] = {i: None}\n continue\n\n # otherwise, move files and clean up\n if isinstance(tv, Track):\n new_path: Path = self.mix_dir / f\"{i:03d} - {tv.trackname}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n elif isinstance(tv, Video):\n new_path: Path = self.mix_dir / f\"{i:03d} - {_path.name}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n else:\n self.files: List[Dict[int, Optional[str]]] = files\n\n # Find all subdirectories written to\n subdirs: Set[Path] = set()\n for tv in self.tracks_videos:\n if isinstance(tv, Track):\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n elif isinstance(tv, Video):\n subdirs.add(tv.artist_dir)\n\n # Copy all artist images, artist bio JSON files out\n # of subdirs\n artist_images: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*.jpg\"):\n if p.name == \"cover.jpg\":\n continue\n artist_images.add(p)\n else:\n for artist_image_path in artist_images:\n if artist_image_path.exists():\n shutil.copyfile(\n artist_image_path.absolute(),\n self.mix_dir / artist_image_path.name,\n )\n\n artist_bios: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*bio.json\"):\n artist_bios.add(p)\n else:\n for artist_bio_path in artist_bios:\n if artist_bio_path.exists():\n shutil.copyfile(\n artist_bio_path.absolute(),\n self.mix_dir / artist_bio_path.name,\n )\n\n # Remove all subdirs\n for subdir in subdirs:\n if subdir.exists():\n shutil.rmtree(subdir)\n else:\n return self.mix_dir\n\n def dumps(self):\n return json.dumps(self.files)\n\n def dump(self, fp=sys.stdout):\n json.dump(self.files, fp)\n\n def get(self, session: Session, audio_format: AudioFormat, out_dir: Path):\n \"\"\"The main method of this class, executing a number of other methods\n in a row:\n - self.get_metadata()\n - self.set_items()\n - self.set_dir()\n - self.save_cover_image()\n - self.get_items()\n - self.flatten_playlist_dir()\n \"\"\"\n self.get_metadata(session)\n \n if self.metadata is None:\n self.files = {}\n return\n \n self.set_items(session)\n self.set_dir(out_dir)\n self.save_cover_image(session, out_dir)\n try:\n self.save_description()\n except Exception:\n pass\n\n _get_items = self.get_items(session, audio_format)\n if _get_items is None:\n logger.critical(f\"Could not retrieve mix with ID '{self.mix_id}'\")\n return\n self.flatten_mix_dir()\n logger.info(f\"Mix files written to '{self.mix_dir}'\")" }, { "identifier": "Playlist", "path": "tidal_wave/playlist.py", "snippet": "class Playlist:\n playlist_id: str # UUID4\n\n def __post_init__(self):\n self.playlist_dir: Optional[Path] = None\n self.playlist_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /playlists endpoint\"\"\"\n self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_playlists(\n session=session, identifier=self.playlist_id\n )\n \n if self.metadata is None:\n return\n \n self.name = (\n self.metadata.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n .replace(\"..\", \"\")\n )\n\n def set_items(self, session: Session):\n \"\"\"Uses data from TIDAL API /playlists/items endpoint to\n populate self.items\"\"\"\n playlist_items: Optional[PlaylistsItemsResponseJSON] = get_playlist(\n session=session, playlist_id=self.playlist_id\n )\n if playlist_items is None:\n self.items = tuple()\n else:\n self.items: Tuple[Optional[PlaylistItem]] = tuple(playlist_items.items)\n\n def set_dir(self, out_dir: Path):\n \"\"\"Populates self.playlist_dir based on self.name, self.playlist_id\"\"\"\n playlist_substring: str = f\"{self.name} [{self.playlist_id}]\"\n self.playlist_dir: Path = out_dir / \"Playlists\" / playlist_substring\n self.playlist_dir.mkdir(parents=True, exist_ok=True)\n\n def save_cover_image(self, session: Session, out_dir: Path):\n \"\"\"Requests self.metadata.image and attempts to write it to disk\"\"\"\n if self.playlist_dir is None:\n self.set_dir(out_dir=out_dir)\n self.cover_path: Path = self.playlist_dir / \"cover.jpg\"\n if not self.cover_path.exists():\n download_cover_image(\n session=session,\n cover_uuid=self.metadata.square_image,\n output_dir=self.playlist_dir,\n dimension=1080,\n )\n else:\n self.playlist_cover_saved = True\n\n def save_description(self):\n \"\"\"Requests self.metadata.description and attempts to write it to disk\"\"\"\n description_path: Path = self.playlist_dir / \"PlaylistDescription.txt\"\n if self.metadata.description is not None and len(self.metadata.description) > 0:\n if not description_path.exists():\n description_path.write_text(f\"{self.metadata.description}\\n\")\n\n def get_items(self, session: Session, audio_format: AudioFormat):\n \"\"\"Using either Track.get() or Video.get(), attempt to request\n the data for each track or video in self.items\"\"\"\n if len(self.items) == 0:\n return\n tracks_videos: list = [None] * len(self.items)\n for i, item in enumerate(self.items):\n if item is None:\n tracks_videos[i] = None\n continue\n elif isinstance(item, TracksEndpointResponseJSON):\n track: Track = Track(track_id=item.id)\n track.get(\n session=session,\n audio_format=audio_format,\n out_dir=self.playlist_dir,\n metadata=item,\n )\n tracks_videos[i] = track\n elif isinstance(item, VideosEndpointResponseJSON):\n video: Video = Video(video_id=item.id)\n video.get(\n session=session,\n out_dir=self.playlist_dir,\n metadata=item,\n )\n tracks_videos[i] = video\n else:\n tracks_videos[i] = None\n continue\n else:\n self.tracks_videos: Tuple[\n Tuple[int, Optional[Union[Track, Video]]]\n ] = tuple(tracks_videos)\n return tracks_videos\n\n def flatten_playlist_dir(self):\n \"\"\"When self.get_items() is called, the tracks and/or videos in\n self.items are downloaded using their self-contained .get() logic;\n this means that they will be downloaded to albums. This function\n \"flattens\" self.playlist_dir, meaning that it moves all downloaded\n audio and video files to self.playlist_dir, and removes the various\n subdirectories created\"\"\"\n files: List[Dict[int, Optional[str]]] = [None] * len(self.tracks_videos)\n if len(self.tracks_videos) == 0:\n return\n subdirs: Set[Path] = set()\n\n for i, tv in enumerate(self.tracks_videos, 1):\n if getattr(tv, \"outfile\") is None:\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n files[i - 1] = {i: None}\n continue\n\n _path: Optional[Path] = Path(tv.outfile) if tv is not None else None\n # if the item never got turned into a track or video\n if _path is None:\n files[i - 1] = {i: None}\n continue\n\n # if the track or video didn't download\n if _path.exists():\n if _path.stat().st_size == 0:\n files[i - 1] = {i: None}\n continue\n else:\n files[i - 1] = {i: None}\n continue\n\n # otherwise, move files and clean up\n if isinstance(tv, Track):\n new_path: Path = self.playlist_dir / f\"{i:03d} - {tv.trackname}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n elif isinstance(tv, Video):\n new_path: Path = self.playlist_dir / f\"{i:03d} - {_path.name}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n else:\n self.files: List[Dict[int, Optional[str]]] = files\n\n # Find all subdirectories written to\n subdirs: Set[Path] = set()\n for tv in self.tracks_videos:\n if isinstance(tv, Track):\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n elif isinstance(tv, Video):\n subdirs.add(tv.artist_dir)\n\n # Copy all artist images, artist bio JSON files out\n # of subdirs\n artist_images: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*.jpg\"):\n if p.name == \"cover.jpg\":\n continue\n artist_images.add(p)\n else:\n for artist_image_path in artist_images:\n if artist_image_path.exists():\n shutil.copyfile(\n artist_image_path.absolute(),\n self.playlist_dir / artist_image_path.name,\n )\n\n artist_bios: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*bio.json\"):\n artist_bios.add(p)\n else:\n for artist_bio_path in artist_bios:\n if artist_bio_path.exists():\n shutil.copyfile(\n artist_bio_path.absolute(),\n self.playlist_dir / artist_bio_path.name,\n )\n\n # Remove all subdirs\n for subdir in subdirs:\n if subdir.exists():\n shutil.rmtree(subdir)\n else:\n return self.playlist_dir\n\n def craft_m3u8_text(self):\n \"\"\"This method creates a file called playlist.m3u8 in self.playlist_dir\n that is a standard M3U. Needs to be called after self.flatten_playlist_dir\n in order to be able to access self.files\n N.b. the already-written file is temporarily copied to a .mp4 version in a\n temporary directory because .m4a files cannot be read with mutagen.\"\"\"\n m3u_text: str = f\"#EXTM3U\\n#EXTENC:UTF-8\\n#EXTIMG:{str(self.cover_path.absolute())}\\n#PLAYLIST:{self.name}\\n\"\n\n logger.info(\n f\"Creating .m3u8 playlist file for Playlist with ID '{self.playlist_id}'\"\n )\n for d in self.files:\n file: str = next(iter(d.values()))\n if file is None:\n continue\n elif file.endswith(\".flac\"):\n m = mutagen.File(file)\n artist: str = m.get(\"artist\", [\"\"])[0]\n title: str = m.get(\"title\", [\"\"])[0]\n extinf: str = (\n f\"#EXTINF:{math.ceil(m.info.length)},\"\n f\"{artist} - {title}\\n{file}\\n\"\n )\n m3u_text += extinf\n elif file.endswith(\".mka\"):\n m = mutagen.File(file)\n artist: str = m.get(\"ARTI\", [\"\"])[0]\n title: str = m.get(\"TITL\", [\"\"])[0]\n extinf: str = (\n f\"#EXTINF:{math.ceil(m.info.length)},\"\n f\"{artist} - {title}\\n{file}\\n\"\n )\n m3u_text += extinf\n elif file.endswith(\".m4a\"):\n # Mutagen cannot read .m4a files, so make a copy with all\n # of the metadata tags as a .mp4 in a temporary directory\n with temporary_file(suffix=\".mp4\") as tf:\n ffmpeg.input(file, hide_banner=None, y=None).output(\n tf.name,\n acodec=\"copy\",\n vcodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n\n m = mutagen.File(tf.name)\n artist: str = m.get(\"\\xa9ART\", [\"\"])[0]\n title: str = m.get(\"\\xa9nam\", [\"\"])[0]\n extinf: str = (\n f\"#EXTINF:{math.ceil(m.info.length)},\"\n f\"{artist} - {title}\\n{file}\\n\"\n )\n m3u_text += extinf\n else:\n return m3u_text\n\n def dumps(self):\n return json.dumps(self.files)\n\n def dump(self, fp=sys.stdout):\n json.dump(self.files, fp)\n\n def get(self, session: Session, audio_format: AudioFormat, out_dir: Path):\n \"\"\"The main method of this class, executing a number of other methods\n in a row:\n - self.get_metadata()\n - self.set_items()\n - self.set_dir()\n - self.save_cover_image()\n - self.save_description()\n - self.get_items()\n - self.flatten_playlist_dir()\n \"\"\"\n self.get_metadata(session)\n \n if self.metadata is None:\n self.files = {}\n return\n \n self.set_items(session)\n self.set_dir(out_dir)\n self.save_cover_image(session, out_dir)\n try:\n self.save_description()\n except Exception:\n pass\n\n _get_items = self.get_items(session, audio_format)\n if _get_items is None:\n logger.critical(f\"Could not retrieve playlist with ID '{self.playlist_id}'\")\n return\n\n self.flatten_playlist_dir()\n\n try:\n m3u8_text: str = self.craft_m3u8_text()\n except Exception as e:\n logger.warning(\n \"Unable to create playlist.m3u8 file for \"\n f\"playlist with ID '{self.playlist_id}'\"\n )\n logger.debug(e)\n else:\n with open(self.playlist_dir / \"playlist.m3u8\", \"w\") as f:\n f.write(m3u8_text)\n\n logger.info(f\"Playlist files written to '{self.playlist_dir}'\")" }, { "identifier": "Track", "path": "tidal_wave/track.py", "snippet": "class Track:\n track_id: int\n\n def __post_init__(self):\n self._has_lyrics: Optional[bool] = None\n self.tags: dict = {}\n self.album_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n self.metadata: Optional[TracksEndpointResponseJSON] = request_tracks(\n session, self.track_id\n )\n\n def get_album(self, session: Session):\n self.album: Optional[AlbumsEndpointResponseJSON] = request_albums(\n session, self.metadata.album.id\n )\n\n def get_credits(self, session: Session):\n self.credits: Optional[TracksCreditsResponseJSON] = request_credits(\n session, self.track_id\n )\n\n def get_lyrics(self, session: Session):\n if self._has_lyrics is None:\n self.lyrics: Optional[TracksLyricsResponseJSON] = request_lyrics(\n session, self.track_id\n )\n if self.lyrics is None:\n self._has_lyrics = False\n else:\n self._has_lyrics = True\n else:\n return self.lyrics\n\n def get_stream(self, session: Session, audio_format: AudioFormat):\n \"\"\"Populates self.stream, self.manifest\"\"\"\n aq: Optional[str] = af_aq.get(audio_format)\n self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream(\n session, self.track_id, aq\n )\n\n def set_manifest(self):\n \"\"\"This method sets self.manifest and self.codec\"\"\"\n self.manifest: Manifest = manifester(self.stream)\n # https://dashif.org/codecs/audio/\n if self.manifest.codecs == \"flac\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mqa\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mha1\": # Sony 360 Reality Audio\n self.codec = \"mka\"\n elif self.manifest.codecs == \"mp4a.40.5\": # HE-AAC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.29\": # HE-AAC v2\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.2\": # AAC-LC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"eac3\": # Enhanced AC-3\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.34\": # MP3\n self.codec = \"mp3\"\n\n def set_album_dir(self, out_dir: Path):\n \"\"\"This method sets self.album_dir, based on self.album and\n out_dir. In particular, self.album_dir is a subdirectory of out_dir\n based on the name of the album's artist\"\"\"\n artist_substring: str = self.album.artist.name.replace(\"..\", \"\")\n album_substring: str = (\n f\"{self.album.name} \" f\"[{self.album.id}] [{self.album.release_date.year}]\"\n )\n self.album_dir: Path = out_dir / artist_substring / album_substring\n self.album_dir.mkdir(parents=True, exist_ok=True)\n\n if self.album.number_of_volumes > 1:\n volume_substring: str = f\"Volume {self.metadata.volume_number}\"\n (self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, audio_format: AudioFormat):\n \"\"\"This method sets self.filename. It's based on self.metadata\n as well as audio_format. Additionally, if the available codecs in\n self.manifest don't match audio_format, warnings are logged\"\"\"\n _track_part: str = f\"{self.metadata.track_number:02d} - {self.metadata.name}\"\n if audio_format == AudioFormat.low:\n track_substring: str = f\"{_track_part} [L]\"\n elif audio_format == AudioFormat.high:\n track_substring: str = f\"{_track_part} [H]\"\n elif audio_format == AudioFormat.lossless:\n track_substring: str = f\"{_track_part} [CD]\"\n elif audio_format == AudioFormat.mqa:\n track_substring: str = f\"{_track_part} [Q]\"\n elif audio_format == AudioFormat.hi_res:\n track_substring: str = f\"{_track_part} [HiRes]\"\n elif audio_format == AudioFormat.dolby_atmos:\n track_substring: str = f\"{_track_part} [A]\"\n elif audio_format == AudioFormat.sony_360_reality_audio:\n track_substring: str = f\"{_track_part} [360]\"\n else:\n track_substring: str = _track_part\n\n # Check for MQA masquerading as HiRes here\n if audio_format == AudioFormat.hi_res:\n if self.manifest.codecs == \"mqa\":\n logger.warning(\n \"Even though HiRes audio format was requested, this track is only \"\n \"available in MQA format. TIDAL regards this as 'HiRes' even though \"\n \"it is probably only lossless; i.e. 16-bit 44.1 kHz quality. \"\n \"Downloading of track will continue, but it will be marked as MQA.\"\n )\n self.filename: Optional[str] = f\"{_track_part} [Q].{self.codec}\"\n elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100):\n logger.warning(\n \"Even though HiRes audio format was requested, and TIDAL responded to \"\n \"that request without error, this track is only available in lossless \"\n \"format; i.e. 16-bit 44.1 kHz quality. Downloading of track will \"\n \"continue, but it will be marked as Lossless ([CD]).\"\n )\n self.filename: Optional[str] = f\"{_track_part} [CD].{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n\n # for use in playlist file ordering\n self.trackname: str = re.match(r\"(?:\\d{2,3} - )(.+?$)\", self.filename).groups()[\n 0\n ]\n\n def set_outfile(self):\n \"\"\"Uses self.album_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n if self.album.number_of_volumes > 1:\n self.outfile: Path = (\n self.album_dir / f\"Volume {self.metadata.volume_number}\" / self.filename\n )\n self.absolute_outfile = str(self.outfile.absolute())\n else:\n self.outfile: Path = self.album_dir / self.filename\n self.absolute_outfile = str(self.outfile.absolute())\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Track {self.absolute_outfile} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def save_artist_image(self, session: Session):\n \"\"\"This method writes a JPEG file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_image: Path = (\n self.album_dir / f\"{a.name.replace('..', '')}.jpg\"\n )\n if not track_artist_image.exists():\n download_artist_image(session, a, self.album_dir)\n\n def save_artist_bio(self, session: Session):\n \"\"\"This method writes a JSON file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_bio_json: Path = self.album_dir / f\"{a.name}-bio.json\"\n if not track_artist_bio_json.exists():\n artist_bio: Optional[ArtistsBioResponseJSON] = request_artist_bio(\n session, a.id\n )\n if artist_bio is not None:\n logger.info(\n f\"Writing artist bio for artist {a.id} to \"\n f\"'{str(track_artist_bio_json.absolute())}\"\n )\n track_artist_bio_json.write_text(artist_bio.to_json())\n\n def save_album_cover(self, session: Session):\n \"\"\"This method saves cover.jpg to self.album_dir; the bytes for cover.jpg\n come from self.album.cover\"\"\"\n self.cover_path: Path = self.album_dir / \"cover.jpg\"\n if (not self.cover_path.exists()) or (not self.album_cover_saved):\n download_cover_image(\n session=session, cover_uuid=self.album.cover, output_dir=self.album_dir\n )\n else:\n self.album_cover_saved = True\n\n def set_urls(self, session: Session):\n \"\"\"This method sets self.urls based on self.manifest\"\"\"\n if isinstance(self.manifest, JSONDASHManifest):\n self.urls: List[str] = self.manifest.urls\n elif isinstance(self.manifest, XMLDASHManifest):\n self.urls: List[str] = self.manifest.build_urls(session=session)\n self.download_headers: Dict[str, str] = {\"Accept\": self.manifest.mime_type}\n if session.session_id is not None:\n self.download_headers[\"sessionId\"] = session.session_id\n self.download_params = {k: None for k in session.params}\n\n def download_url(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method downloads self.urls[0], for use in situations when\n the manifest returned by TIDAL API contains one URL. It relies on\n byte range headers to incrementally get all content from a URL\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n # Implement HTTP range requests here to mimic official clients\n range_size: int = 1024 * 1024 # 1 MiB\n content_length: int = fetch_content_length(\n session=session, url=self.urls[0]\n )\n if content_length == 0:\n return\n\n range_headers: Iterable[str] = http_request_range_headers(\n content_length=content_length,\n range_size=range_size,\n return_tuple=False,\n )\n for rh in range_headers:\n with session.get(\n self.urls[0], params=self.download_params, headers={\"Range\": rh}\n ) as rr:\n if not rr.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(rr.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFMPEG to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile,\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(\n f\"Track {self.track_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def download_urls(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method writes the contents from self.urls to a temporary\n directory, then uses FFmpeg to re-mux the data to self.outfile\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=self.download_headers, params=self.download_params\n ) as resp:\n if not resp.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(resp.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFmpeg to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile, acodec=\"copy\", loglevel=\"quiet\"\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(f\"Track {self.track_id} written to '{self.absolute_outfile}'\")\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method GETs the data from self.urls and writes it\n to self.outfile.\"\"\"\n if len(self.urls) == 1:\n outfile: Optional[Path] = self.download_url(\n session=session, out_dir=out_dir\n )\n else:\n outfile: Optional[Path] = self.download_urls(\n session=session, out_dir=out_dir\n )\n\n return outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary,\n write the correct values of various metadata tags to the file.\n E.g. for .flac files, the album's artist is 'ALBUMARTIST',\n but for .m4a files, the album's artist is 'aART'.\"\"\"\n tags = dict()\n if (self.codec == \"flac\") or (self.codec == \"mka\"):\n tag_map = {k: v[\"flac\"] for k, v in TAG_MAPPING.items()}\n elif self.codec == \"m4a\":\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"album\"]] = self.album.title\n tags[tag_map[\"album_artist\"]] = \";\".join((a.name for a in self.album.artists))\n tags[tag_map[\"album_peak_amplitude\"]] = f\"{self.stream.album_peak_amplitude}\"\n tags[tag_map[\"album_replay_gain\"]] = f\"{self.stream.album_replay_gain}\"\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"barcode\"]] = self.album.upc\n tags[tag_map[\"comment\"]] = self.metadata.url\n tags[tag_map[\"copyright\"]] = self.metadata.copyright\n tags[tag_map[\"date\"]] = str(self.album.release_date)\n tags[tag_map[\"isrc\"]] = self.metadata.isrc\n tags[tag_map[\"title\"]] = self.metadata.name\n tags[tag_map[\"track_peak_amplitude\"]] = f\"{self.metadata.peak}\"\n tags[tag_map[\"track_replay_gain\"]] = f\"{self.metadata.replay_gain}\"\n # credits\n for tag in {\"composer\", \"engineer\", \"lyricist\", \"mixer\", \"producer\", \"remixer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.credits, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n # lyrics\n try:\n _lyrics = self.lyrics.subtitles\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[tag_map[\"lyrics\"]] = _lyrics\n\n if self.codec == \"flac\":\n # track and disk\n tags[\"DISCTOTAL\"] = f\"{self.album.number_of_volumes}\"\n tags[\"DISC\"] = f\"{self.metadata.volume_number}\"\n tags[\"TRACKTOTAL\"] = f\"{self.album.number_of_tracks}\"\n tags[\"TRACKNUMBER\"] = f\"{self.metadata.track_number}\"\n # instrument-specific\n # piano\n try:\n piano_credits: List[str] = [\n f\"{pc} (piano)\" for pc in self.credits.piano\n ]\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[\"PERFORMER\"] = piano_credits\n\n elif self.codec == \"m4a\":\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n tags[\"trkn\"] = [(self.metadata.track_number, self.album.number_of_tracks)]\n tags[\"disk\"] = [(self.metadata.volume_number, self.album.number_of_volumes)]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n # add album cover\n if self.codec == \"flac\":\n p = mutagen.flac.Picture()\n p.type = mutagen.id3.PictureType.COVER_FRONT\n p.desc = \"Album Cover\"\n p.width = p.height = 1280\n p.mime = \"image/jpeg\"\n p.data = self.cover_path.read_bytes()\n self.mutagen.add_picture(p)\n elif self.codec == \"m4a\":\n self.mutagen[\"covr\"] = [\n MP4Cover(self.cover_path.read_bytes(), imageformat=MP4Cover.FORMAT_JPEG)\n ]\n\n self.mutagen.save()\n # Make sure audio track comes first because of\n # less-sophisticated audio players that only\n # recognize the first stream\n if self.codec == \"flac\":\n with temporary_file(suffix=\".mka\") as tf:\n shutil.move(str(self.outfile.absolute()), tf.name)\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{tf.name}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy\n -metadata:s:v title='Album cover' -metadata:s:v comment='Cover (front)'\n -disposition:v attached_pic \"{self.absolute_outfile}\" \"\"\"\n )\n subprocess.run(cmd)\n elif self.codec == \"m4a\":\n with temporary_file(suffix=\".mka\") as tf:\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{self.absolute_outfile}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy \"{tf.name}\" \"\"\"\n )\n subprocess.run(cmd)\n shutil.copyfile(tf.name, self.absolute_outfile)\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n metadata: Optional[TracksEndpointResponseJSON] = None,\n album: Optional[AlbumsEndpointResponseJSON] = None,\n ) -> Optional[str]:\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n self.outfile = None\n return\n\n if \"DOLBY_ATMOS\" in self.metadata.media_metadata.tags:\n if audio_format != AudioFormat.dolby_atmos:\n logger.warning(\n f\"Track {self.track_id} is only available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if audio_format == AudioFormat.dolby_atmos:\n if \"DOLBY_ATMOS\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Dolby Atmos audio format was requested, but track \"\n f\"{self.track_id} is not available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.sony_360_reality_audio:\n if \"SONY_360RA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Sony 360 Reality Audio audio format was requested, but track \"\n f\"{self.track_id} is not available in Sony 360 Reality Audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.mqa:\n if \"MQA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"MQA audio format was requested, but track \"\n f\"{self.track_id} is not available in MQA audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if album is None:\n self.get_album(session)\n else:\n self.album = album\n\n if self.album is None:\n self.outfile = None\n return\n\n self.get_credits(session)\n self.get_stream(session, audio_format)\n if self.stream is None:\n return\n self.set_manifest()\n self.set_album_dir(out_dir)\n self.set_filename(audio_format)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return\n\n try:\n self.get_lyrics(session)\n except Exception:\n pass\n\n self.save_album_cover(session)\n\n try:\n self.save_artist_image(session)\n except Exception:\n pass\n\n try:\n self.save_artist_bio(session)\n except Exception:\n pass\n\n self.set_urls(session)\n\n if self.download(session, out_dir) is None:\n return\n\n self.craft_tags()\n self.set_tags()\n\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dump({k: v}, fp)\n return None\n\n def dumps(self) -> str:\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dumps({k: v})\n return None" }, { "identifier": "Video", "path": "tidal_wave/video.py", "snippet": "class Video:\n video_id: int\n\n def __post_init__(self):\n self.tags: dict = {}\n self.codec: str = \"mp4\"\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /videos endpoint\"\"\"\n self.metadata: Optional[VideosEndpointResponseJSON] = request_videos(\n session, self.video_id\n )\n\n def get_contributors(self, session: Session):\n \"\"\"Request from TIDAL API /videos/contributors endpoint\"\"\"\n self.contributors: Optional[\n VideosContributorsResponseJSON\n ] = request_video_contributors(session, self.video_id)\n\n def get_stream(self, session: Session, video_format=VideoFormat.high):\n \"\"\"Populates self.stream by requesting from TIDAL API\n /videos/playbackinfopostpaywall endpoint\"\"\"\n self.stream: Optional[VideosEndpointStreamResponseJSON] = request_video_stream(\n session, self.video_id, video_format.value\n )\n\n def get_m3u8(self, session: Session):\n \"\"\"This method sets self.m3u8, an m3u8.M3U8 object\n following the HTTP Live Streaming specification; parsed from\n self.stream. I.e., self.get_stream() needs to have been executed\n before calling this method. N.b. self.m3u8 almost certainly will\n be a multivariant playlist, meaning further processing of its\n contents will be necessary.\"\"\"\n self.m3u8: m3u8.Playlist = playlister(session=session, vesrj=self.stream)\n\n def set_urls(self):\n \"\"\"This method uses self.m3u8, an m3u8.M3U8 object that is variant:\n (https://developer.apple.com/documentation/http-live-streaming/creating-a-multivariant-playlist)\n It retrieves the highest-quality .m3u8 in its .playlists attribute,\n and sets self.urls as the list of strings from that m3u8.Playlist\"\"\"\n # for now, just get the highest-bandwidth playlist\n playlist: m3u8.Playlist = variant_streams(self.m3u8)\n self.M3U8 = m3u8.load(playlist.uri)\n if self.M3U8 is None or len(self.M3U8.files) == 0:\n raise TidalM3U8Exception(\n f\"HLS media segments are not available for video {self.video_id}\"\n )\n self.urls: List[str] = self.M3U8.files\n\n def set_artist_dir(self, out_dir: Path):\n \"\"\"Set self.artist_dir, which is the subdirectory of `out_dir`\n with name `self.metadata.artist.name`\"\"\"\n self.artist_dir: Path = out_dir / self.metadata.artist.name\n self.artist_dir.mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, out_dir: Path):\n \"\"\"Set self.filename, which is constructed from self.metadata.name\n and self.stream.video_quality\"\"\"\n self.filename: str = (\n f\"{self.metadata.name} [{self.stream.video_quality}].{self.codec}\"\n )\n\n def set_outfile(self):\n \"\"\"Uses self.artist_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n self.outfile: Path = self.artist_dir / self.filename\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Video {str(self.outfile.absolute())} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"Requests the HLS video files that constitute self.video_id.\n Writes HLS bytes to a temporary file, then uses FFmpeg to write the\n video data to self.outfile\"\"\"\n if session.session_id is not None:\n download_headers: Dict[str, str] = {\"sessionId\": session.session_id}\n else:\n download_headers: dict = dict()\n download_params: Dict[str, None] = {k: None for k in session.params}\n # self.outfile should already have been set by self.set_outfile()\n logger.info(\n f\"Writing video {self.video_id} to '{str(self.outfile.absolute())}'\"\n )\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=download_headers, params=download_params\n ) as download_response:\n if not download_response.ok:\n logger.warning(f\"Could not download {self}\")\n else:\n ntf.write(download_response.content)\n else:\n ntf.seek(0)\n\n # will always be .mp4 because HLS\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n str(self.outfile.absolute()),\n vcodec=\"copy\",\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n\n logger.info(\n f\"Video {self.video_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary, write the correct values of\n various metadata tags to the file. Videos are .mp4\"\"\"\n tags = dict()\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"comment\"]] = f\"https://tidal.com/browse/video/{self.video_id}\"\n tags[tag_map[\"date\"]] = str(self.metadata.release_date.date())\n tags[tag_map[\"title\"]] = self.metadata.title\n\n for tag in {\"composer\", \"director\", \"lyricist\", \"producer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.contributors, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n self.mutagen.save()\n\n def get(\n self,\n session: Session,\n out_dir: Path,\n metadata: Optional[\"VideosEndpointResponseJSON\"] = None,\n ) -> Optional[str]:\n \"\"\"The main method of this class. Executes a number of other methods\n in a row:\n - self.get_metadata()\n - self.get_contributors()\n - self.get_stream()\n - self.get_m3u8()\n - self.set_urls()\n - self.set_artist_dir()\n - self.set_filename()\n - self.set_outfile()\n - self.download()\n - self.craft_tags()\n - self.set_tags()\n \"\"\"\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n return None\n\n self.get_contributors(session)\n self.get_stream(session)\n if self.stream is None:\n return None\n self.get_m3u8(session)\n self.set_urls()\n self.set_artist_dir(out_dir)\n self.set_filename(out_dir)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return None\n\n if self.download(session, out_dir) is None:\n return None\n\n self.craft_tags()\n self.set_tags()\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n json.dump({self.metadata.title: str(self.outfile.absolute())}, fp)\n\n def dumps(self) -> str:\n return json.dumps({self.metadata.title: str(self.outfile.absolute())})" }, { "identifier": "match_tidal_url", "path": "tidal_wave/models.py", "snippet": "def match_tidal_url(input_str: str) -> Optional[TidalResource]:\n \"\"\"Attempt to match the `input_str` to either the URL of a track or an\n album in the Tidal API service. Returns None if `input_str` matches\n neither, otherwise a subclass of TidalResource corresponding to the\n parsed input_str type\n \"\"\"\n resource_match: Optional[TidalResource] = None\n tidal_resources: Tuple[TidalResource] = (\n TidalTrack,\n TidalAlbum,\n TidalVideo,\n TidalPlaylist,\n TidalMix,\n TidalArtist,\n )\n for T in tidal_resources:\n try:\n resource_match = T(input_str)\n except ValueError as v:\n logger.debug(v)\n continue\n else:\n return resource_match" }, { "identifier": "TidalAlbum", "path": "tidal_wave/models.py", "snippet": "class TidalAlbum(TidalResource):\n \"\"\"Class representing a TIDAL album. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?album/(\\d{5,9})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL album URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL album ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalArtist", "path": "tidal_wave/models.py", "snippet": "class TidalArtist(TidalResource):\n \"\"\"Class representing a TIDAL artist. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?artist/(\\d{7,9})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL album URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL album ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalMix", "path": "tidal_wave/models.py", "snippet": "class TidalMix(TidalResource):\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?mix/(\\w{30})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL mix URL\")\n else:\n self.tidal_id = _id\n logger.info(f\"TIDAL mix ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalPlaylist", "path": "tidal_wave/models.py", "snippet": "class TidalPlaylist(TidalResource):\n \"\"\"Class representing a TIDAL playlist. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?playlist/\"\n r\"([0-9a-f]{8}\\-[0-9a-f]{4}\\-4[0-9a-f]{3}\\-[89ab][0-9a-f]{3}\\-[0-9a-f]{12})(?:.*?)?\"\n )\n\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL playlist URL\")\n else:\n self.tidal_id = _id\n logger.info(f\"TIDAL playlist ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalTrack", "path": "tidal_wave/models.py", "snippet": "class TidalTrack(TidalResource):\n \"\"\"Class representing a TIDAL track. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?(?:album/\\d{5,9}/)?track/(\\d{5,9})(?:.*?)?\"\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL track URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL track ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalVideo", "path": "tidal_wave/models.py", "snippet": "class TidalVideo(TidalResource):\n \"\"\"Class representing a TIDAL video. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?video/(\\d{7,9})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL video URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL video ID parsed from input: {self.tidal_id}\")" } ]
from contextlib import closing from pathlib import Path from typing import Optional, Union from .login import login, AudioFormat, LogLevel from .album import Album from .artist import Artist from .mix import Mix from .playlist import Playlist from .track import Track from .video import Video from .models import ( match_tidal_url, TidalAlbum, TidalArtist, TidalMix, TidalPlaylist, TidalTrack, TidalVideo, ) from platformdirs import user_music_path from typing_extensions import Annotated import logging import typer
17,218
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[ Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo] ] = match_tidal_url(tidal_url) if tidal_resource is None: logger.critical( f"Cannot parse '{tidal_url}' as a TIDAL album, artist, mix, playlist, track, or video URL" ) raise typer.Exit(code=1) s, audio_format = login(audio_format=audio_format) if s is None: raise typer.Exit(code=1) with closing(s) as session: if isinstance(tidal_resource, TidalTrack):
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[ Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo] ] = match_tidal_url(tidal_url) if tidal_resource is None: logger.critical( f"Cannot parse '{tidal_url}' as a TIDAL album, artist, mix, playlist, track, or video URL" ) raise typer.Exit(code=1) s, audio_format = login(audio_format=audio_format) if s is None: raise typer.Exit(code=1) with closing(s) as session: if isinstance(tidal_resource, TidalTrack):
track = Track(track_id=tidal_resource.tidal_id)
7
2023-12-12 21:50:25+00:00
24k
ZS-YANG/FemtoDet-v3
mmdet/configs/rtmdet/rtmdet_ins_s_8xb32_300e_coco.py
[ { "identifier": "PackDetInputs", "path": "mmdet/datasets/transforms/formatting.py", "snippet": "class PackDetInputs(BaseTransform):\n \"\"\"Pack the inputs data for the detection / semantic segmentation /\n panoptic segmentation.\n\n The ``img_meta`` item is always populated. The contents of the\n ``img_meta`` dictionary depends on ``meta_keys``. By default this includes:\n\n - ``img_id``: id of the image\n\n - ``img_path``: path to the image file\n\n - ``ori_shape``: original shape of the image as a tuple (h, w)\n\n - ``img_shape``: shape of the image input to the network as a tuple \\\n (h, w). Note that images may be zero padded on the \\\n bottom/right if the batch tensor is larger than this shape.\n\n - ``scale_factor``: a float indicating the preprocessing scale\n\n - ``flip``: a boolean indicating if image flip transform was used\n\n - ``flip_direction``: the flipping direction\n\n Args:\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')``\n \"\"\"\n mapping_table = {\n 'gt_bboxes': 'bboxes',\n 'gt_bboxes_labels': 'labels',\n 'gt_masks': 'masks'\n }\n\n def __init__(self,\n meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')):\n self.meta_keys = meta_keys\n\n def transform(self, results: dict) -> dict:\n \"\"\"Method to pack the input data.\n\n Args:\n results (dict): Result dict from the data pipeline.\n\n Returns:\n dict:\n\n - 'inputs' (obj:`torch.Tensor`): The forward data of models.\n - 'data_sample' (obj:`DetDataSample`): The annotation info of the\n sample.\n \"\"\"\n packed_results = dict()\n if 'img' in results:\n img = results['img']\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n # To improve the computational speed by by 3-5 times, apply:\n # If image is not contiguous, use\n # `numpy.transpose()` followed by `numpy.ascontiguousarray()`\n # If image is already contiguous, use\n # `torch.permute()` followed by `torch.contiguous()`\n # Refer to https://github.com/open-mmlab/mmdetection/pull/9533\n # for more details\n if not img.flags.c_contiguous:\n img = np.ascontiguousarray(img.transpose(2, 0, 1))\n img = to_tensor(img)\n else:\n img = to_tensor(img).permute(2, 0, 1).contiguous()\n\n packed_results['inputs'] = img\n\n if 'gt_ignore_flags' in results:\n valid_idx = np.where(results['gt_ignore_flags'] == 0)[0]\n ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0]\n\n data_sample = DetDataSample()\n instance_data = InstanceData()\n ignore_instance_data = InstanceData()\n\n for key in self.mapping_table.keys():\n if key not in results:\n continue\n if key == 'gt_masks' or isinstance(results[key], BaseBoxes):\n if 'gt_ignore_flags' in results:\n instance_data[\n self.mapping_table[key]] = results[key][valid_idx]\n ignore_instance_data[\n self.mapping_table[key]] = results[key][ignore_idx]\n else:\n instance_data[self.mapping_table[key]] = results[key]\n else:\n if 'gt_ignore_flags' in results:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key][valid_idx])\n ignore_instance_data[self.mapping_table[key]] = to_tensor(\n results[key][ignore_idx])\n else:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key])\n data_sample.gt_instances = instance_data\n data_sample.ignored_instances = ignore_instance_data\n\n if 'proposals' in results:\n proposals = InstanceData(\n bboxes=to_tensor(results['proposals']),\n scores=to_tensor(results['proposals_scores']))\n data_sample.proposals = proposals\n\n if 'gt_seg_map' in results:\n gt_sem_seg_data = dict(\n sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy()))\n gt_sem_seg_data = PixelData(**gt_sem_seg_data)\n if 'ignore_index' in results:\n metainfo = dict(ignore_index=results['ignore_index'])\n gt_sem_seg_data.set_metainfo(metainfo)\n data_sample.gt_sem_seg = gt_sem_seg_data\n\n img_meta = {}\n for key in self.meta_keys:\n if key in results:\n img_meta[key] = results[key]\n data_sample.set_metainfo(img_meta)\n packed_results['data_samples'] = data_sample\n\n return packed_results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(meta_keys={self.meta_keys})'\n return repr_str" }, { "identifier": "FilterAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class FilterAnnotations(BaseTransform):\n \"\"\"Filter invalid annotations.\n\n Required Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n\n Args:\n min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth\n boxes. Default: (1., 1.)\n min_gt_mask_area (int): Minimum foreground area of ground truth masks.\n Default: 1\n by_box (bool): Filter instances with bounding boxes not meeting the\n min_gt_bbox_wh threshold. Default: True\n by_mask (bool): Filter instances with masks not meeting\n min_gt_mask_area threshold. Default: False\n keep_empty (bool): Whether to return None when it\n becomes an empty bbox after filtering. Defaults to True.\n \"\"\"\n\n def __init__(self,\n min_gt_bbox_wh: Tuple[int, int] = (1, 1),\n min_gt_mask_area: int = 1,\n by_box: bool = True,\n by_mask: bool = False,\n keep_empty: bool = True) -> None:\n # TODO: add more filter options\n assert by_box or by_mask\n self.min_gt_bbox_wh = min_gt_bbox_wh\n self.min_gt_mask_area = min_gt_mask_area\n self.by_box = by_box\n self.by_mask = by_mask\n self.keep_empty = keep_empty\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to filter annotations.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n assert 'gt_bboxes' in results\n gt_bboxes = results['gt_bboxes']\n if gt_bboxes.shape[0] == 0:\n return results\n\n tests = []\n if self.by_box:\n tests.append(\n ((gt_bboxes.widths > self.min_gt_bbox_wh[0]) &\n (gt_bboxes.heights > self.min_gt_bbox_wh[1])).numpy())\n if self.by_mask:\n assert 'gt_masks' in results\n gt_masks = results['gt_masks']\n tests.append(gt_masks.areas >= self.min_gt_mask_area)\n\n keep = tests[0]\n for t in tests[1:]:\n keep = keep & t\n\n if not keep.any():\n if self.keep_empty:\n return None\n\n keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags')\n for key in keys:\n if key in results:\n results[key] = results[key][keep]\n\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(min_gt_bbox_wh={self.min_gt_bbox_wh}, ' \\\n f'keep_empty={self.keep_empty})'" }, { "identifier": "LoadAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class LoadAnnotations(MMCV_LoadAnnotations):\n \"\"\"Load and process the ``instances`` and ``seg_map`` annotation provided\n by dataset.\n\n The annotation format is as the following:\n\n .. code-block:: python\n\n {\n 'instances':\n [\n {\n # List of 4 numbers representing the bounding box of the\n # instance, in (x1, y1, x2, y2) order.\n 'bbox': [x1, y1, x2, y2],\n\n # Label of image classification.\n 'bbox_label': 1,\n\n # Used in instance/panoptic segmentation. The segmentation mask\n # of the instance or the information of segments.\n # 1. If list[list[float]], it represents a list of polygons,\n # one for each connected component of the object. Each\n # list[float] is one simple polygon in the format of\n # [x1, y1, ..., xn, yn] (n >= 3). The Xs and Ys are absolute\n # coordinates in unit of pixels.\n # 2. If dict, it represents the per-pixel segmentation mask in\n # COCO's compressed RLE format. The dict should have keys\n # “size” and “counts”. Can be loaded by pycocotools\n 'mask': list[list[float]] or dict,\n\n }\n ]\n # Filename of semantic or panoptic segmentation ground truth file.\n 'seg_map_path': 'a/b/c'\n }\n\n After this module, the annotation has been changed to the format below:\n\n .. code-block:: python\n\n {\n # In (x1, y1, x2, y2) order, float type. N is the number of bboxes\n # in an image\n 'gt_bboxes': BaseBoxes(N, 4)\n # In int type.\n 'gt_bboxes_labels': np.ndarray(N, )\n # In built-in class\n 'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W)\n # In uint8 type.\n 'gt_seg_map': np.ndarray (H, W)\n # in (x, y, v) order, float type.\n }\n\n Required Keys:\n\n - height\n - width\n - instances\n\n - bbox (optional)\n - bbox_label\n - mask (optional)\n - ignore_flag\n\n - seg_map_path (optional)\n\n Added Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32])\n - gt_bboxes_labels (np.int64)\n - gt_masks (BitmapMasks | PolygonMasks)\n - gt_seg_map (np.uint8)\n - gt_ignore_flags (bool)\n\n Args:\n with_bbox (bool): Whether to parse and load the bbox annotation.\n Defaults to True.\n with_label (bool): Whether to parse and load the label annotation.\n Defaults to True.\n with_mask (bool): Whether to parse and load the mask annotation.\n Default: False.\n with_seg (bool): Whether to parse and load the semantic segmentation\n annotation. Defaults to False.\n poly2mask (bool): Whether to convert mask to bitmap. Default: True.\n box_type (str): The box type used to wrap the bboxes. If ``box_type``\n is None, gt_bboxes will keep being np.ndarray. Defaults to 'hbox'.\n reduce_zero_label (bool): Whether reduce all label value\n by 1. Usually used for datasets where 0 is background label.\n Defaults to False.\n ignore_index (int): The label index to be ignored.\n Valid only if reduce_zero_label is true. Defaults is 255.\n imdecode_backend (str): The image decoding backend type. The backend\n argument for :func:``mmcv.imfrombytes``.\n See :fun:``mmcv.imfrombytes`` for details.\n Defaults to 'cv2'.\n backend_args (dict, optional): Arguments to instantiate the\n corresponding backend. Defaults to None.\n \"\"\"\n\n def __init__(\n self,\n with_mask: bool = False,\n poly2mask: bool = True,\n box_type: str = 'hbox',\n # use for semseg\n reduce_zero_label: bool = False,\n ignore_index: int = 255,\n **kwargs) -> None:\n super(LoadAnnotations, self).__init__(**kwargs)\n self.with_mask = with_mask\n self.poly2mask = poly2mask\n self.box_type = box_type\n self.reduce_zero_label = reduce_zero_label\n self.ignore_index = ignore_index\n\n def _load_bboxes(self, results: dict) -> None:\n \"\"\"Private function to load bounding box annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n Returns:\n dict: The dict contains loaded bounding box annotations.\n \"\"\"\n gt_bboxes = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_bboxes.append(instance['bbox'])\n gt_ignore_flags.append(instance['ignore_flag'])\n if self.box_type is None:\n results['gt_bboxes'] = np.array(\n gt_bboxes, dtype=np.float32).reshape((-1, 4))\n else:\n _, box_type_cls = get_box_type(self.box_type)\n results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32)\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n\n def _load_labels(self, results: dict) -> None:\n \"\"\"Private function to load label annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded label annotations.\n \"\"\"\n gt_bboxes_labels = []\n for instance in results.get('instances', []):\n gt_bboxes_labels.append(instance['bbox_label'])\n # TODO: Inconsistent with mmcv, consider how to deal with it later.\n results['gt_bboxes_labels'] = np.array(\n gt_bboxes_labels, dtype=np.int64)\n\n def _poly2mask(self, mask_ann: Union[list, dict], img_h: int,\n img_w: int) -> np.ndarray:\n \"\"\"Private function to convert masks represented with polygon to\n bitmaps.\n\n Args:\n mask_ann (list | dict): Polygon mask annotation input.\n img_h (int): The height of output mask.\n img_w (int): The width of output mask.\n\n Returns:\n np.ndarray: The decode bitmap mask of shape (img_h, img_w).\n \"\"\"\n\n if isinstance(mask_ann, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n rle = maskUtils.merge(rles)\n elif isinstance(mask_ann['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n else:\n # rle\n rle = mask_ann\n mask = maskUtils.decode(rle)\n return mask\n\n def _process_masks(self, results: dict) -> list:\n \"\"\"Process gt_masks and filter invalid polygons.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n list: Processed gt_masks.\n \"\"\"\n gt_masks = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_mask = instance['mask']\n # If the annotation of segmentation mask is invalid,\n # ignore the whole instance.\n if isinstance(gt_mask, list):\n gt_mask = [\n np.array(polygon) for polygon in gt_mask\n if len(polygon) % 2 == 0 and len(polygon) >= 6\n ]\n if len(gt_mask) == 0:\n # ignore this instance and set gt_mask to a fake mask\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif not self.poly2mask:\n # `PolygonMasks` requires a ploygon of format List[np.array],\n # other formats are invalid.\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif isinstance(gt_mask, dict) and \\\n not (gt_mask.get('counts') is not None and\n gt_mask.get('size') is not None and\n isinstance(gt_mask['counts'], (list, str))):\n # if gt_mask is a dict, it should include `counts` and `size`,\n # so that `BitmapMasks` can uncompressed RLE\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n gt_masks.append(gt_mask)\n # re-process gt_ignore_flags\n gt_ignore_flags.append(instance['ignore_flag'])\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n return gt_masks\n\n def _load_masks(self, results: dict) -> None:\n \"\"\"Private function to load mask annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n \"\"\"\n h, w = results['ori_shape']\n gt_masks = self._process_masks(results)\n if self.poly2mask:\n gt_masks = BitmapMasks(\n [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)\n else:\n # fake polygon masks will be ignored in `PackDetInputs`\n gt_masks = PolygonMasks([mask for mask in gt_masks], h, w)\n results['gt_masks'] = gt_masks\n\n def _load_seg_map(self, results: dict) -> None:\n \"\"\"Private function to load semantic segmentation annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmcv.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded semantic segmentation annotations.\n \"\"\"\n if results.get('seg_map_path', None) is None:\n return\n\n img_bytes = get(\n results['seg_map_path'], backend_args=self.backend_args)\n gt_semantic_seg = mmcv.imfrombytes(\n img_bytes, flag='unchanged',\n backend=self.imdecode_backend).squeeze()\n\n if self.reduce_zero_label:\n # avoid using underflow conversion\n gt_semantic_seg[gt_semantic_seg == 0] = self.ignore_index\n gt_semantic_seg = gt_semantic_seg - 1\n gt_semantic_seg[gt_semantic_seg == self.ignore_index -\n 1] = self.ignore_index\n\n # modify if custom classes\n if results.get('label_map', None) is not None:\n # Add deep copy to solve bug of repeatedly\n # replace `gt_semantic_seg`, which is reported in\n # https://github.com/open-mmlab/mmsegmentation/pull/1445/\n gt_semantic_seg_copy = gt_semantic_seg.copy()\n for old_id, new_id in results['label_map'].items():\n gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id\n results['gt_seg_map'] = gt_semantic_seg\n results['ignore_index'] = self.ignore_index\n\n def transform(self, results: dict) -> dict:\n \"\"\"Function to load multiple types annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded bounding box, label and\n semantic segmentation.\n \"\"\"\n\n if self.with_bbox:\n self._load_bboxes(results)\n if self.with_label:\n self._load_labels(results)\n if self.with_mask:\n self._load_masks(results)\n if self.with_seg:\n self._load_seg_map(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(with_bbox={self.with_bbox}, '\n repr_str += f'with_label={self.with_label}, '\n repr_str += f'with_mask={self.with_mask}, '\n repr_str += f'with_seg={self.with_seg}, '\n repr_str += f'poly2mask={self.poly2mask}, '\n repr_str += f\"imdecode_backend='{self.imdecode_backend}', \"\n repr_str += f'backend_args={self.backend_args})'\n return repr_str" }, { "identifier": "CachedMixUp", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMixUp(BaseTransform):\n \"\"\"Cached mixup data augmentation.\n\n .. code:: text\n\n mixup transform\n +------------------------------+\n | mixup image | |\n | +--------|--------+ |\n | | | | |\n |---------------+ | |\n | | | |\n | | image | |\n | | | |\n | | | |\n | |-----------------+ |\n | pad |\n +------------------------------+\n\n The cached mixup transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Another random image is picked from the cache and embedded in\n the top left patch(after padding and resizing)\n 3. The target of mixup transform is the weighted average of mixup\n image and origin image.\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n - mix_results (List[dict])\n\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n\n Args:\n img_scale (Sequence[int]): Image output size after mixup pipeline.\n The shape order should be (width, height). Defaults to (640, 640).\n ratio_range (Sequence[float]): Scale ratio of mixup image.\n Defaults to (0.5, 1.5).\n flip_ratio (float): Horizontal flip ratio of mixup image.\n Defaults to 0.5.\n pad_val (int): Pad value. Defaults to 114.\n max_iters (int): The maximum number of iterations. If the number of\n iterations is greater than `max_iters`, but gt_bbox is still\n empty, then the iteration is terminated. Defaults to 15.\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 20.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n \"\"\"\n\n def __init__(self,\n img_scale: Tuple[int, int] = (640, 640),\n ratio_range: Tuple[float, float] = (0.5, 1.5),\n flip_ratio: float = 0.5,\n pad_val: float = 114.0,\n max_iters: int = 15,\n bbox_clip_border: bool = True,\n max_cached_images: int = 20,\n random_pop: bool = True,\n prob: float = 1.0) -> None:\n assert isinstance(img_scale, tuple)\n assert max_cached_images >= 2, 'The length of cache must >= 2, ' \\\n f'but got {max_cached_images}.'\n assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \\\n f'got {prob}.'\n self.dynamic_scale = img_scale\n self.ratio_range = ratio_range\n self.flip_ratio = flip_ratio\n self.pad_val = pad_val\n self.max_iters = max_iters\n self.bbox_clip_border = bbox_clip_border\n self.results_cache = []\n\n self.max_cached_images = max_cached_images\n self.random_pop = random_pop\n self.prob = prob\n\n @cache_randomness\n def get_indexes(self, cache: list) -> int:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The result cache.\n\n Returns:\n int: index.\n \"\"\"\n\n for i in range(self.max_iters):\n index = random.randint(0, len(cache) - 1)\n gt_bboxes_i = cache[index]['gt_bboxes']\n if len(gt_bboxes_i) != 0:\n break\n return index\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"MixUp transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 1:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n\n index = self.get_indexes(self.results_cache)\n retrieve_results = copy.deepcopy(self.results_cache[index])\n\n # TODO: refactor mixup to reuse these code.\n if retrieve_results['gt_bboxes'].shape[0] == 0:\n # empty bbox\n return results\n\n retrieve_img = retrieve_results['img']\n with_mask = True if 'gt_masks' in results else False\n\n jit_factor = random.uniform(*self.ratio_range)\n is_flip = random.uniform(0, 1) > self.flip_ratio\n\n if len(retrieve_img.shape) == 3:\n out_img = np.ones(\n (self.dynamic_scale[1], self.dynamic_scale[0], 3),\n dtype=retrieve_img.dtype) * self.pad_val\n else:\n out_img = np.ones(\n self.dynamic_scale[::-1],\n dtype=retrieve_img.dtype) * self.pad_val\n\n # 1. keep_ratio resize\n scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0],\n self.dynamic_scale[0] / retrieve_img.shape[1])\n retrieve_img = mmcv.imresize(\n retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),\n int(retrieve_img.shape[0] * scale_ratio)))\n\n # 2. paste\n out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img\n\n # 3. scale jit\n scale_ratio *= jit_factor\n out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),\n int(out_img.shape[0] * jit_factor)))\n\n # 4. flip\n if is_flip:\n out_img = out_img[:, ::-1, :]\n\n # 5. random crop\n ori_img = results['img']\n origin_h, origin_w = out_img.shape[:2]\n target_h, target_w = ori_img.shape[:2]\n padded_img = np.ones((max(origin_h, target_h), max(\n origin_w, target_w), 3)) * self.pad_val\n padded_img = padded_img.astype(np.uint8)\n padded_img[:origin_h, :origin_w] = out_img\n\n x_offset, y_offset = 0, 0\n if padded_img.shape[0] > target_h:\n y_offset = random.randint(0, padded_img.shape[0] - target_h)\n if padded_img.shape[1] > target_w:\n x_offset = random.randint(0, padded_img.shape[1] - target_w)\n padded_cropped_img = padded_img[y_offset:y_offset + target_h,\n x_offset:x_offset + target_w]\n\n # 6. adjust bbox\n retrieve_gt_bboxes = retrieve_results['gt_bboxes']\n retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])\n if with_mask:\n retrieve_gt_masks = retrieve_results['gt_masks'].rescale(\n scale_ratio)\n\n if self.bbox_clip_border:\n retrieve_gt_bboxes.clip_([origin_h, origin_w])\n\n if is_flip:\n retrieve_gt_bboxes.flip_([origin_h, origin_w],\n direction='horizontal')\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.flip()\n\n # 7. filter\n cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()\n cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-x_offset,\n direction='horizontal')\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-y_offset,\n direction='vertical')\n\n if self.bbox_clip_border:\n cp_retrieve_gt_bboxes.clip_([target_h, target_w])\n\n # 8. mix up\n ori_img = ori_img.astype(np.float32)\n mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']\n retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']\n\n mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(\n (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)\n mixup_gt_bboxes_labels = np.concatenate(\n (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)\n mixup_gt_ignore_flags = np.concatenate(\n (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)\n if with_mask:\n mixup_gt_masks = retrieve_gt_masks.cat(\n [results['gt_masks'], retrieve_gt_masks])\n\n # remove outside bbox\n inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy()\n mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]\n mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]\n mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]\n if with_mask:\n mixup_gt_masks = mixup_gt_masks[inside_inds]\n\n results['img'] = mixup_img.astype(np.uint8)\n results['img_shape'] = mixup_img.shape[:2]\n results['gt_bboxes'] = mixup_gt_bboxes\n results['gt_bboxes_labels'] = mixup_gt_bboxes_labels\n results['gt_ignore_flags'] = mixup_gt_ignore_flags\n if with_mask:\n results['gt_masks'] = mixup_gt_masks\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(dynamic_scale={self.dynamic_scale}, '\n repr_str += f'ratio_range={self.ratio_range}, '\n repr_str += f'flip_ratio={self.flip_ratio}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'max_iters={self.max_iters}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop}, '\n repr_str += f'prob={self.prob})'\n return repr_str" }, { "identifier": "CachedMosaic", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMosaic(Mosaic):\n \"\"\"Cached mosaic augmentation.\n\n Cached mosaic transform will random select images from the cache\n and combine them into one output image.\n\n .. code:: text\n\n mosaic transform\n center_x\n +------------------------------+\n | pad | pad |\n | +-----------+ |\n | | | |\n | | image1 |--------+ |\n | | | | |\n | | | image2 | |\n center_y |----+-------------+-----------|\n | | cropped | |\n |pad | image3 | image4 |\n | | | |\n +----|-------------+-----------+\n | |\n +-------------+\n\n The cached mosaic transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Choose the mosaic center as the intersections of 4 images\n 3. Get the left top image according to the index, and randomly\n sample another 3 images from the result cache.\n 4. Sub image will be cropped if image is larger than mosaic patch\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n Args:\n img_scale (Sequence[int]): Image size before mosaic pipeline of single\n image. The shape order should be (width, height).\n Defaults to (640, 640).\n center_ratio_range (Sequence[float]): Center ratio range of mosaic\n output. Defaults to (0.5, 1.5).\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n pad_val (int): Pad value. Defaults to 114.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 40.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n \"\"\"\n\n def __init__(self,\n *args,\n max_cached_images: int = 40,\n random_pop: bool = True,\n **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.results_cache = []\n self.random_pop = random_pop\n assert max_cached_images >= 4, 'The length of cache must >= 4, ' \\\n f'but got {max_cached_images}.'\n self.max_cached_images = max_cached_images\n\n @cache_randomness\n def get_indexes(self, cache: list) -> list:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The results cache.\n\n Returns:\n list: indexes.\n \"\"\"\n\n indexes = [random.randint(0, len(cache) - 1) for _ in range(3)]\n return indexes\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Mosaic transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 4:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n indices = self.get_indexes(self.results_cache)\n mix_results = [copy.deepcopy(self.results_cache[i]) for i in indices]\n\n # TODO: refactor mosaic to reuse these code.\n mosaic_bboxes = []\n mosaic_bboxes_labels = []\n mosaic_ignore_flags = []\n mosaic_masks = []\n with_mask = True if 'gt_masks' in results else False\n\n if len(results['img'].shape) == 3:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3),\n self.pad_val,\n dtype=results['img'].dtype)\n else:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)),\n self.pad_val,\n dtype=results['img'].dtype)\n\n # mosaic center x, y\n center_x = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[0])\n center_y = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[1])\n center_position = (center_x, center_y)\n\n loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n for i, loc in enumerate(loc_strs):\n if loc == 'top_left':\n results_patch = copy.deepcopy(results)\n else:\n results_patch = copy.deepcopy(mix_results[i - 1])\n\n img_i = results_patch['img']\n h_i, w_i = img_i.shape[:2]\n # keep_ratio resize\n scale_ratio_i = min(self.img_scale[1] / h_i,\n self.img_scale[0] / w_i)\n img_i = mmcv.imresize(\n img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))\n\n # compute the combine parameters\n paste_coord, crop_coord = self._mosaic_combine(\n loc, center_position, img_i.shape[:2][::-1])\n x1_p, y1_p, x2_p, y2_p = paste_coord\n x1_c, y1_c, x2_c, y2_c = crop_coord\n\n # crop and paste image\n mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]\n\n # adjust coordinate\n gt_bboxes_i = results_patch['gt_bboxes']\n gt_bboxes_labels_i = results_patch['gt_bboxes_labels']\n gt_ignore_flags_i = results_patch['gt_ignore_flags']\n\n padw = x1_p - x1_c\n padh = y1_p - y1_c\n gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])\n gt_bboxes_i.translate_([padw, padh])\n mosaic_bboxes.append(gt_bboxes_i)\n mosaic_bboxes_labels.append(gt_bboxes_labels_i)\n mosaic_ignore_flags.append(gt_ignore_flags_i)\n if with_mask and results_patch.get('gt_masks', None) is not None:\n gt_masks_i = results_patch['gt_masks']\n gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i))\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padw,\n direction='horizontal')\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padh,\n direction='vertical')\n mosaic_masks.append(gt_masks_i)\n\n mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)\n mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)\n mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)\n\n if self.bbox_clip_border:\n mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]])\n # remove outside bboxes\n inside_inds = mosaic_bboxes.is_inside(\n [2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy()\n mosaic_bboxes = mosaic_bboxes[inside_inds]\n mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]\n mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]\n\n results['img'] = mosaic_img\n results['img_shape'] = mosaic_img.shape[:2]\n results['gt_bboxes'] = mosaic_bboxes\n results['gt_bboxes_labels'] = mosaic_bboxes_labels\n results['gt_ignore_flags'] = mosaic_ignore_flags\n\n if with_mask:\n mosaic_masks = mosaic_masks[0].cat(mosaic_masks)\n results['gt_masks'] = mosaic_masks[inside_inds]\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(img_scale={self.img_scale}, '\n repr_str += f'center_ratio_range={self.center_ratio_range}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'prob={self.prob}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop})'\n return repr_str" }, { "identifier": "Pad", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Pad(MMCV_Pad):\n \"\"\"Pad the image & segmentation map.\n\n There are three padding modes: (1) pad to a fixed size and (2) pad to the\n minimum size that is divisible by some number. and (3)pad to square. Also,\n pad to square and pad to the minimum size can be used as the same time.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - pad_shape\n - pad_fixed_size\n - pad_size_divisor\n\n Args:\n size (tuple, optional): Fixed padding size.\n Expected padding shape (width, height). Defaults to None.\n size_divisor (int, optional): The divisor of padded size. Defaults to\n None.\n pad_to_square (bool): Whether to pad the image into a square.\n Currently only used for YOLOX. Defaults to False.\n pad_val (Number | dict[str, Number], optional) - Padding value for if\n the pad_mode is \"constant\". If it is a single number, the value\n to pad the image is the number and to pad the semantic\n segmentation map is 255. If it is a dict, it should have the\n following keys:\n\n - img: The value to pad the image.\n - seg: The value to pad the semantic segmentation map.\n Defaults to dict(img=0, seg=255).\n padding_mode (str): Type of padding. Should be: constant, edge,\n reflect or symmetric. Defaults to 'constant'.\n\n - constant: pads with a constant value, this value is specified\n with pad_val.\n - edge: pads with the last value at the edge of the image.\n - reflect: pads with reflection of image without repeating the last\n value on the edge. For example, padding [1, 2, 3, 4] with 2\n elements on both sides in reflect mode will result in\n [3, 2, 1, 2, 3, 4, 3, 2].\n - symmetric: pads with reflection of image repeating the last value\n on the edge. For example, padding [1, 2, 3, 4] with 2 elements on\n both sides in symmetric mode will result in\n [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def _pad_masks(self, results: dict) -> None:\n \"\"\"Pad masks according to ``results['pad_shape']``.\"\"\"\n if results.get('gt_masks', None) is not None:\n pad_val = self.pad_val.get('masks', 0)\n pad_shape = results['pad_shape'][:2]\n results['gt_masks'] = results['gt_masks'].pad(\n pad_shape, pad_val=pad_val)\n\n def transform(self, results: dict) -> dict:\n \"\"\"Call function to pad images, masks, semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n self._pad_img(results)\n self._pad_seg(results)\n self._pad_masks(results)\n return results" }, { "identifier": "RandomCrop", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomCrop(BaseTransform):\n \"\"\"Random crop the image & bboxes & masks.\n\n The absolute ``crop_size`` is sampled based on ``crop_type`` and\n ``image_size``, then the cropped results are generated.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n - gt_seg_map (optional)\n - gt_instances_ids (options, only used in MOT/VIS)\n\n Added Keys:\n\n - homography_matrix\n\n Args:\n crop_size (tuple): The relative ratio or absolute pixels of\n (width, height).\n crop_type (str, optional): One of \"relative_range\", \"relative\",\n \"absolute\", \"absolute_range\". \"relative\" randomly crops\n (h * crop_size[0], w * crop_size[1]) part from an input of size\n (h, w). \"relative_range\" uniformly samples relative crop size from\n range [crop_size[0], 1] and [crop_size[1], 1] for height and width\n respectively. \"absolute\" crops from an input with absolute size\n (crop_size[0], crop_size[1]). \"absolute_range\" uniformly samples\n crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w\n in range [crop_size[0], min(w, crop_size[1])].\n Defaults to \"absolute\".\n allow_negative_crop (bool, optional): Whether to allow a crop that does\n not contain any bbox area. Defaults to False.\n recompute_bbox (bool, optional): Whether to re-compute the boxes based\n on cropped instance masks. Defaults to False.\n bbox_clip_border (bool, optional): Whether clip the objects outside\n the border of the image. Defaults to True.\n\n Note:\n - If the image is smaller than the absolute crop size, return the\n original image.\n - The keys for bboxes, labels and masks must be aligned. That is,\n ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and\n ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and\n ``gt_masks_ignore``.\n - If the crop does not contain any gt-bbox region and\n ``allow_negative_crop`` is set to False, skip this image.\n \"\"\"\n\n def __init__(self,\n crop_size: tuple,\n crop_type: str = 'absolute',\n allow_negative_crop: bool = False,\n recompute_bbox: bool = False,\n bbox_clip_border: bool = True) -> None:\n if crop_type not in [\n 'relative_range', 'relative', 'absolute', 'absolute_range'\n ]:\n raise ValueError(f'Invalid crop_type {crop_type}.')\n if crop_type in ['absolute', 'absolute_range']:\n assert crop_size[0] > 0 and crop_size[1] > 0\n assert isinstance(crop_size[0], int) and isinstance(\n crop_size[1], int)\n if crop_type == 'absolute_range':\n assert crop_size[0] <= crop_size[1]\n else:\n assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1\n self.crop_size = crop_size\n self.crop_type = crop_type\n self.allow_negative_crop = allow_negative_crop\n self.bbox_clip_border = bbox_clip_border\n self.recompute_bbox = recompute_bbox\n\n def _crop_data(self, results: dict, crop_size: Tuple[int, int],\n allow_negative_crop: bool) -> Union[dict, None]:\n \"\"\"Function to randomly crop images, bounding boxes, masks, semantic\n segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n crop_size (Tuple[int, int]): Expected absolute size after\n cropping, (h, w).\n allow_negative_crop (bool): Whether to allow a crop that does not\n contain any bbox area.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n assert crop_size[0] > 0 and crop_size[1] > 0\n img = results['img']\n margin_h = max(img.shape[0] - crop_size[0], 0)\n margin_w = max(img.shape[1] - crop_size[1], 0)\n offset_h, offset_w = self._rand_offset((margin_h, margin_w))\n crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n # Record the homography matrix for the RandomCrop\n homography_matrix = np.array(\n [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]],\n dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n # crop the image\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n img_shape = img.shape\n results['img'] = img\n results['img_shape'] = img_shape[:2]\n\n # crop bboxes accordingly and clip to the image boundary\n if results.get('gt_bboxes', None) is not None:\n bboxes = results['gt_bboxes']\n bboxes.translate_([-offset_w, -offset_h])\n if self.bbox_clip_border:\n bboxes.clip_(img_shape[:2])\n valid_inds = bboxes.is_inside(img_shape[:2]).numpy()\n # If the crop does not contain any gt-bbox area and\n # allow_negative_crop is False, skip this image.\n if (not valid_inds.any() and not allow_negative_crop):\n return None\n\n results['gt_bboxes'] = bboxes[valid_inds]\n\n if results.get('gt_ignore_flags', None) is not None:\n results['gt_ignore_flags'] = \\\n results['gt_ignore_flags'][valid_inds]\n\n if results.get('gt_bboxes_labels', None) is not None:\n results['gt_bboxes_labels'] = \\\n results['gt_bboxes_labels'][valid_inds]\n\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'][\n valid_inds.nonzero()[0]].crop(\n np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n if self.recompute_bbox:\n results['gt_bboxes'] = results['gt_masks'].get_bboxes(\n type(results['gt_bboxes']))\n\n # We should remove the instance ids corresponding to invalid boxes.\n if results.get('gt_instances_ids', None) is not None:\n results['gt_instances_ids'] = \\\n results['gt_instances_ids'][valid_inds]\n\n # crop semantic seg\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2,\n crop_x1:crop_x2]\n\n return results\n\n @cache_randomness\n def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generate crop offset.\n\n Args:\n margin (Tuple[int, int]): The upper bound for the offset generated\n randomly.\n\n Returns:\n Tuple[int, int]: The random offset for the crop.\n \"\"\"\n margin_h, margin_w = margin\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n\n return offset_h, offset_w\n\n @cache_randomness\n def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generates the absolute crop size based on `crop_type` and\n `image_size`.\n\n Args:\n image_size (Tuple[int, int]): (h, w).\n\n Returns:\n crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels.\n \"\"\"\n h, w = image_size\n if self.crop_type == 'absolute':\n return min(self.crop_size[1], h), min(self.crop_size[0], w)\n elif self.crop_type == 'absolute_range':\n crop_h = np.random.randint(\n min(h, self.crop_size[0]),\n min(h, self.crop_size[1]) + 1)\n crop_w = np.random.randint(\n min(w, self.crop_size[0]),\n min(w, self.crop_size[1]) + 1)\n return crop_h, crop_w\n elif self.crop_type == 'relative':\n crop_w, crop_h = self.crop_size\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n else:\n # 'relative_range'\n crop_size = np.asarray(self.crop_size, dtype=np.float32)\n crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to randomly crop images, bounding boxes, masks,\n semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n image_size = results['img'].shape[:2]\n crop_size = self._get_crop_size(image_size)\n results = self._crop_data(results, crop_size, self.allow_negative_crop)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(crop_size={self.crop_size}, '\n repr_str += f'crop_type={self.crop_type}, '\n repr_str += f'allow_negative_crop={self.allow_negative_crop}, '\n repr_str += f'recompute_bbox={self.recompute_bbox}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n return repr_str" }, { "identifier": "RandomFlip", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomFlip(MMCV_RandomFlip):\n \"\"\"Flip the image & bbox & mask & segmentation map. Added or Updated keys:\n flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip\n modes:\n\n - ``prob`` is float, ``direction`` is string: the image will be\n ``direction``ly flipped with probability of ``prob`` .\n E.g., ``prob=0.5``, ``direction='horizontal'``,\n then image will be horizontally flipped with probability of 0.5.\n - ``prob`` is float, ``direction`` is list of string: the image will\n be ``direction[i]``ly flipped with probability of\n ``prob/len(direction)``.\n E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``,\n then image will be horizontally flipped with probability of 0.25,\n vertically with probability of 0.25.\n - ``prob`` is list of float, ``direction`` is list of string:\n given ``len(prob) == len(direction)``, the image will\n be ``direction[i]``ly flipped with probability of ``prob[i]``.\n E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal',\n 'vertical']``, then image will be horizontally flipped with\n probability of 0.3, vertically with probability of 0.5.\n\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - flip\n - flip_direction\n - homography_matrix\n\n\n Args:\n prob (float | list[float], optional): The flipping probability.\n Defaults to None.\n direction(str | list[str]): The flipping direction. Options\n If input is a list, the length must equal ``prob``. Each\n element in ``prob`` indicates the flip probability of\n corresponding direction. Defaults to 'horizontal'.\n \"\"\"\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the RandomFlip.\"\"\"\n cur_dir = results['flip_direction']\n h, w = results['img'].shape[:2]\n\n if cur_dir == 'horizontal':\n homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'vertical':\n homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'diagonal':\n homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n else:\n homography_matrix = np.eye(3, dtype=np.float32)\n\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def _flip(self, results: dict) -> None:\n \"\"\"Flip images, bounding boxes, and semantic segmentation map.\"\"\"\n # flip image\n results['img'] = mmcv.imflip(\n results['img'], direction=results['flip_direction'])\n\n img_shape = results['img'].shape[:2]\n\n # flip bboxes\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].flip_(img_shape, results['flip_direction'])\n\n # flip masks\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'].flip(\n results['flip_direction'])\n\n # flip segs\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = mmcv.imflip(\n results['gt_seg_map'], direction=results['flip_direction'])\n\n # record homography matrix for flip\n self._record_homography_matrix(results)" }, { "identifier": "Resize", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Resize(MMCV_Resize):\n \"\"\"Resize images & bbox & seg.\n\n This transform resizes the input image according to ``scale`` or\n ``scale_factor``. Bboxes, masks, and seg map are then resized\n with the same scale factor.\n if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to\n resize.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n\n Added Keys:\n\n - scale\n - scale_factor\n - keep_ratio\n - homography_matrix\n\n Args:\n scale (int or tuple): Images scales for resizing. Defaults to None\n scale_factor (float or tuple[float]): Scale factors for resizing.\n Defaults to None.\n keep_ratio (bool): Whether to keep the aspect ratio when resizing the\n image. Defaults to False.\n clip_object_border (bool): Whether to clip the objects\n outside the border of the image. In some dataset like MOT17, the gt\n bboxes are allowed to cross the border of images. Therefore, we\n don't need to clip the gt bboxes in these cases. Defaults to True.\n backend (str): Image resize backend, choices are 'cv2' and 'pillow'.\n These two backends generates slightly different results. Defaults\n to 'cv2'.\n interpolation (str): Interpolation method, accepted values are\n \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n to 'bilinear'.\n \"\"\"\n\n def _resize_masks(self, results: dict) -> None:\n \"\"\"Resize masks with ``results['scale']``\"\"\"\n if results.get('gt_masks', None) is not None:\n if self.keep_ratio:\n results['gt_masks'] = results['gt_masks'].rescale(\n results['scale'])\n else:\n results['gt_masks'] = results['gt_masks'].resize(\n results['img_shape'])\n\n def _resize_bboxes(self, results: dict) -> None:\n \"\"\"Resize bounding boxes with ``results['scale_factor']``.\"\"\"\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].rescale_(results['scale_factor'])\n if self.clip_object_border:\n results['gt_bboxes'].clip_(results['img_shape'])\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the Resize.\"\"\"\n w_scale, h_scale = results['scale_factor']\n homography_matrix = np.array(\n [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Transform function to resize images, bounding boxes and semantic\n segmentation map.\n\n Args:\n results (dict): Result dict from loading pipeline.\n Returns:\n dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map',\n 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys\n are updated in result dict.\n \"\"\"\n if self.scale:\n results['scale'] = self.scale\n else:\n img_shape = results['img'].shape[:2]\n results['scale'] = _scale_size(img_shape[::-1], self.scale_factor)\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n self._record_homography_matrix(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(scale={self.scale}, '\n repr_str += f'scale_factor={self.scale_factor}, '\n repr_str += f'keep_ratio={self.keep_ratio}, '\n repr_str += f'clip_object_border={self.clip_object_border}), '\n repr_str += f'backend={self.backend}), '\n repr_str += f'interpolation={self.interpolation})'\n return repr_str" }, { "identifier": "YOLOXHSVRandomAug", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class YOLOXHSVRandomAug(BaseTransform):\n \"\"\"Apply HSV augmentation to image sequentially. It is referenced from\n https://github.com/Megvii-\n BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21.\n\n Required Keys:\n\n - img\n\n Modified Keys:\n\n - img\n\n Args:\n hue_delta (int): delta of hue. Defaults to 5.\n saturation_delta (int): delta of saturation. Defaults to 30.\n value_delta (int): delat of value. Defaults to 30.\n \"\"\"\n\n def __init__(self,\n hue_delta: int = 5,\n saturation_delta: int = 30,\n value_delta: int = 30) -> None:\n self.hue_delta = hue_delta\n self.saturation_delta = saturation_delta\n self.value_delta = value_delta\n\n @cache_randomness\n def _get_hsv_gains(self):\n hsv_gains = np.random.uniform(-1, 1, 3) * [\n self.hue_delta, self.saturation_delta, self.value_delta\n ]\n # random selection of h, s, v\n hsv_gains *= np.random.randint(0, 2, 3)\n # prevent overflow\n hsv_gains = hsv_gains.astype(np.int16)\n return hsv_gains\n\n def transform(self, results: dict) -> dict:\n img = results['img']\n hsv_gains = self._get_hsv_gains()\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16)\n\n img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180\n img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255)\n img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255)\n cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img)\n\n results['img'] = img\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(hue_delta={self.hue_delta}, '\n repr_str += f'saturation_delta={self.saturation_delta}, '\n repr_str += f'value_delta={self.value_delta})'\n return repr_str" }, { "identifier": "PipelineSwitchHook", "path": "mmdet/engine/hooks/pipeline_switch_hook.py", "snippet": "class PipelineSwitchHook(Hook):\n \"\"\"Switch data pipeline at switch_epoch.\n\n Args:\n switch_epoch (int): switch pipeline at this epoch.\n switch_pipeline (list[dict]): the pipeline to switch to.\n \"\"\"\n\n def __init__(self, switch_epoch, switch_pipeline):\n self.switch_epoch = switch_epoch\n self.switch_pipeline = switch_pipeline\n self._restart_dataloader = False\n self._has_switched = False\n\n def before_train_epoch(self, runner):\n \"\"\"switch pipeline.\"\"\"\n epoch = runner.epoch\n train_loader = runner.train_dataloader\n if epoch >= self.switch_epoch and not self._has_switched:\n runner.logger.info('Switch pipeline now!')\n # The dataset pipeline cannot be updated when persistent_workers\n # is True, so we need to force the dataloader's multi-process\n # restart. This is a very hacky approach.\n train_loader.dataset.pipeline = Compose(self.switch_pipeline)\n if hasattr(train_loader, 'persistent_workers'\n ) and train_loader.persistent_workers is True:\n train_loader._DataLoader__initialized = False\n train_loader._iterator = None\n self._restart_dataloader = True\n self._has_switched = True\n else:\n # Once the restart is complete, we need to restore\n # the initialization flag.\n if self._restart_dataloader:\n train_loader._DataLoader__initialized = True" }, { "identifier": "ExpMomentumEMA", "path": "mmdet/models/layers/ema.py", "snippet": "class ExpMomentumEMA(ExponentialMovingAverage):\n \"\"\"Exponential moving average (EMA) with exponential momentum strategy,\n which is used in YOLOX.\n\n Args:\n model (nn.Module): The model to be averaged.\n momentum (float): The momentum used for updating ema parameter.\n Ema's parameter are updated with the formula:\n `averaged_param = (1-momentum) * averaged_param + momentum *\n source_param`. Defaults to 0.0002.\n gamma (int): Use a larger momentum early in training and gradually\n annealing to a smaller value to update the ema model smoothly. The\n momentum is calculated as\n `(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.\n Defaults to 2000.\n interval (int): Interval between two updates. Defaults to 1.\n device (torch.device, optional): If provided, the averaged model will\n be stored on the :attr:`device`. Defaults to None.\n update_buffers (bool): if True, it will compute running averages for\n both the parameters and the buffers of the model. Defaults to\n False.\n \"\"\"\n\n def __init__(self,\n model: nn.Module,\n momentum: float = 0.0002,\n gamma: int = 2000,\n interval=1,\n device: Optional[torch.device] = None,\n update_buffers: bool = False) -> None:\n super().__init__(\n model=model,\n momentum=momentum,\n interval=interval,\n device=device,\n update_buffers=update_buffers)\n assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'\n self.gamma = gamma\n\n def avg_func(self, averaged_param: Tensor, source_param: Tensor,\n steps: int) -> None:\n \"\"\"Compute the moving average of the parameters using the exponential\n momentum strategy.\n\n Args:\n averaged_param (Tensor): The averaged parameters.\n source_param (Tensor): The source parameters.\n steps (int): The number of times the parameters have been\n updated.\n \"\"\"\n momentum = (1 - self.momentum) * math.exp(\n -float(1 + steps) / self.gamma) + self.momentum\n averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)" } ]
from mmengine.config import read_base from .rtmdet_ins_l_8xb32_300e_coco import * from mmcv.transforms.loading import LoadImageFromFile from mmcv.transforms.processing import RandomResize from mmengine.hooks.ema_hook import EMAHook from mmdet.datasets.transforms.formatting import PackDetInputs from mmdet.datasets.transforms.loading import (FilterAnnotations, LoadAnnotations) from mmdet.datasets.transforms.transforms import (CachedMixUp, CachedMosaic, Pad, RandomCrop, RandomFlip, Resize, YOLOXHSVRandomAug) from mmdet.engine.hooks.pipeline_switch_hook import PipelineSwitchHook from mmdet.models.layers.ema import ExpMomentumEMA
18,163
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0),
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0),
resize_type=Resize,
8
2023-12-11 15:23:03+00:00
24k
chinhsuanwu/ifusion
model/zero123.py
[ { "identifier": "inject_trainable_lora_extended", "path": "ldm/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n eval=True,\n):\n \"\"\"\n inject lora into model, and returns lora parameter groups.\n \"\"\"\n\n require_grad_params = []\n names = []\n\n if loras != None:\n loras = torch.load(loras, map_location=model.device)\n\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, nn.Conv2d]\n ):\n if _child_module.__class__ == nn.Linear:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedLinear(\n _child_module.in_features,\n _child_module.out_features,\n _child_module.bias is not None,\n r=r,\n )\n _tmp.linear.weight = weight\n if bias is not None:\n _tmp.linear.bias = bias\n elif _child_module.__class__ == nn.Conv2d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv2d(\n _child_module.in_channels,\n _child_module.out_channels,\n _child_module.kernel_size,\n _child_module.stride,\n _child_module.padding,\n _child_module.dilation,\n _child_module.groups,\n _child_module.bias is not None,\n r=r,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n\n # switch the module\n _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)\n if bias is not None:\n _tmp.to(_child_module.bias.device).to(_child_module.bias.dtype)\n\n _module._modules[name] = _tmp\n\n require_grad_params.append(_module._modules[name].lora_up.parameters())\n require_grad_params.append(_module._modules[name].lora_down.parameters())\n\n if loras != None:\n _module._modules[name].lora_up.weight = nn.Parameter(loras.pop(0).to(model.dtype))\n _module._modules[name].lora_down.weight = nn.Parameter(loras.pop(0).to(model.dtype))\n\n _module._modules[name].lora_up.weight.requires_grad = True if not eval else False\n _module._modules[name].lora_down.weight.requires_grad = True if not eval else False\n names.append(name)\n\n return require_grad_params, names" }, { "identifier": "monkeypatch_remove_lora", "path": "ldm/lora.py", "snippet": "def monkeypatch_remove_lora(model):\n for _module, name, _child_module in _find_modules(\n model, search_class=[LoraInjectedLinear, LoraInjectedConv2d]\n ):\n if isinstance(_child_module, LoraInjectedLinear):\n _source = _child_module.linear\n weight, bias = _source.weight, _source.bias\n\n _tmp = nn.Linear(\n _source.in_features, _source.out_features, bias is not None\n )\n\n _tmp.weight = weight\n if bias is not None:\n _tmp.bias = bias\n\n else:\n _source = _child_module.conv\n weight, bias = _source.weight, _source.bias\n\n _tmp = nn.Conv2d(\n in_channels=_source.in_channels,\n out_channels=_source.out_channels,\n kernel_size=_source.kernel_size,\n stride=_source.stride,\n padding=_source.padding,\n dilation=_source.dilation,\n groups=_source.groups,\n bias=bias is not None,\n )\n\n _tmp.weight = weight\n if bias is not None:\n _tmp.bias = bias\n\n _module._modules[name] = _tmp" }, { "identifier": "save_lora_weight", "path": "ldm/lora.py", "snippet": "def save_lora_weight(\n model,\n path=\"./lora.pt\",\n target_replace_module=DEFAULT_TARGET_REPLACE,\n):\n weights = []\n for _up, _down in extract_lora_ups_down(\n model, target_replace_module=target_replace_module\n ):\n weights.append(_up.weight.to(\"cpu\").to(torch.float16))\n weights.append(_down.weight.to(\"cpu\").to(torch.float16))\n\n torch.save(weights, path)" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(\n self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image_cond\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n unet_trainable=True,\n *args,\n **kwargs,\n ):\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs[\"timesteps\"]\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = \"concat\" if concat_mode else \"crossattn\"\n if cond_stage_config == \"__is_unconditional__\":\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.unet_trainable = unet_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer(\"scale_factor\", torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n\n # construct linear projection layer for concatenating image CLIP embedding and RT\n self.cc_projection = nn.Linear(772, 768)\n nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])\n nn.init.zeros_(list(self.cc_projection.parameters())[1])\n self.cc_projection.requires_grad_(True)\n\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n\n def make_cond_schedule(\n self,\n ):\n self.cond_ids = torch.full(\n size=(self.num_timesteps,),\n fill_value=self.num_timesteps - 1,\n dtype=torch.long,\n )\n ids = torch.round(\n torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)\n ).long()\n self.cond_ids[: self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if (\n self.scale_by_std\n and self.current_epoch == 0\n and self.global_step == 0\n and batch_idx == 0\n and not self.restarted_from_ckpt\n ):\n assert (\n self.scale_factor == 1.0\n ), \"rather not use custom rescaling and std-rescaling simultaneously\"\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer(\"scale_factor\", 1.0 / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(\n self,\n given_betas=None,\n beta_schedule=\"linear\",\n timesteps=1000,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3,\n ):\n super().register_schedule(\n given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s\n )\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != \"__is_first_stage__\"\n assert config != \"__is_unconditional__\"\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(\n self, samples, desc=\"\", force_no_decoder_quantization=False\n ):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(\n self.decode_first_stage(\n zd.to(self.device), force_not_quantize=force_no_decoder_quantization\n )\n )\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, \"n b c h w -> b n c h w\")\n denoise_grid = rearrange(denoise_grid, \"b n c h w -> (b n) c h w\")\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(\n f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\"\n )\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, \"encode\") and callable(\n self.cond_stage_model.encode\n ):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(\n torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1\n )[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(\n weighting,\n self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"],\n )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(\n L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"],\n )\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(\n self, x, kernel_size, stride, uf=1, df=1\n ): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(\n kernel_size[0], kernel_size[1], Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(\n kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1,\n padding=0,\n stride=(stride[0] * uf, stride[1] * uf),\n )\n fold = torch.nn.Fold(\n output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2\n )\n\n weighting = self.get_weighting(\n kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(\n 1, 1, h * uf, w * uf\n ) # normalizes the overlap\n weighting = weighting.view(\n (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)\n )\n\n elif df > 1 and uf == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(\n kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1,\n padding=0,\n stride=(stride[0] // df, stride[1] // df),\n )\n fold = torch.nn.Fold(\n output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2\n )\n\n weighting = self.get_weighting(\n kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(\n 1, 1, h // df, w // df\n ) # normalizes the overlap\n weighting = weighting.view(\n (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)\n )\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(\n self,\n batch,\n k,\n return_first_stage_outputs=False,\n force_c_encode=False,\n cond_key=None,\n return_original_cond=False,\n bs=None,\n uncond=0.05,\n ):\n x = super().get_input(batch, k)\n T = batch[\"T\"].to(memory_format=torch.contiguous_format).float()\n\n if bs is not None:\n x = x[:bs]\n T = T[:bs].to(self.device)\n\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n cond_key = cond_key or self.cond_stage_key\n xc = super().get_input(batch, cond_key).to(self.device)\n if bs is not None:\n xc = xc[:bs]\n cond = {}\n\n # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%.\n random = torch.rand(x.size(0), device=x.device)\n prompt_mask = rearrange(random < 2 * uncond, \"n -> n 1 1\")\n input_mask = 1 - rearrange(\n (random >= uncond).float() * (random < 3 * uncond).float(), \"n -> n 1 1 1\"\n )\n null_prompt = self.get_learned_conditioning([\"\"])\n\n # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768]\n # print('=========== xc shape ===========', xc.shape)\n with torch.enable_grad():\n clip_emb = self.get_learned_conditioning(xc).detach()\n null_prompt = self.get_learned_conditioning([\"\"]).detach()\n cond[\"c_crossattn\"] = [\n self.cc_projection(\n torch.cat(\n [\n torch.where(prompt_mask, null_prompt, clip_emb),\n T[:, None, :],\n ],\n dim=-1,\n )\n )\n ]\n cond[\"c_concat\"] = [\n input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach()\n ]\n out = [z, cond]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_original_cond:\n out.append(xc)\n return out\n\n # @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, \"b h w c -> b c h w\").contiguous()\n\n z = 1.0 / self.scale_factor * z\n\n if hasattr(self, \"split_input_params\"):\n if self.split_input_params[\"patch_distributed_vq\"]:\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n uf = self.split_input_params[\"vqf\"]\n bs, nc, h, w = z.shape\n if ks[0] > h or ks[1] > w:\n ks = (min(ks[0], h), min(ks[1], w))\n print(\"reducing Kernel\")\n\n if stride[0] > h or stride[1] > w:\n stride = (min(stride[0], h), min(stride[1], w))\n print(\"reducing stride\")\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n z, ks, stride, uf=uf\n )\n\n z = unfold(z) # (bn, nc * prod(**ks), L)\n # 1. Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n # 2. apply model loop over last dim\n if isinstance(self.first_stage_model, VQModelInterface):\n output_list = [\n self.first_stage_model.decode(\n z[:, :, :, :, i],\n force_not_quantize=predict_cids or force_not_quantize,\n )\n for i in range(z.shape[-1])\n ]\n else:\n output_list = [\n self.first_stage_model.decode(z[:, :, :, :, i])\n for i in range(z.shape[-1])\n ]\n\n o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)\n o = o * weighting\n # Reverse 1. reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n decoded = fold(o)\n decoded = decoded / normalization # norm is shape (1, 1, h, w)\n return decoded\n else:\n if isinstance(self.first_stage_model, VQModelInterface):\n return self.first_stage_model.decode(\n z, force_not_quantize=predict_cids or force_not_quantize\n )\n else:\n return self.first_stage_model.decode(z)\n\n else:\n if isinstance(self.first_stage_model, VQModelInterface):\n return self.first_stage_model.decode(\n z, force_not_quantize=predict_cids or force_not_quantize\n )\n else:\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n if hasattr(self, \"split_input_params\"):\n if self.split_input_params[\"patch_distributed_vq\"]:\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n df = self.split_input_params[\"vqf\"]\n self.split_input_params[\"original_image_size\"] = x.shape[-2:]\n bs, nc, h, w = x.shape\n if ks[0] > h or ks[1] > w:\n ks = (min(ks[0], h), min(ks[1], w))\n print(\"reducing Kernel\")\n\n if stride[0] > h or stride[1] > w:\n stride = (min(stride[0], h), min(stride[1], w))\n print(\"reducing stride\")\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n x, ks, stride, df=df\n )\n z = unfold(x) # (bn, nc * prod(**ks), L)\n # Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n output_list = [\n self.first_stage_model.encode(z[:, :, :, :, i])\n for i in range(z.shape[-1])\n ]\n\n o = torch.stack(output_list, axis=-1)\n o = o * weighting\n\n # Reverse reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n decoded = fold(o)\n decoded = decoded / normalization\n return decoded\n\n else:\n return self.first_stage_model.encode(x)\n else:\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, step_ratio=None, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c, step_ratio=step_ratio)\n return loss\n\n def forward(self, x, c, step_ratio=None, *args, **kwargs):\n if step_ratio is not None:\n t = np.round((1 - step_ratio) * self.num_timesteps).clip(0, self.num_timesteps - 1)\n t = torch.full((x.shape[0],), t, dtype=torch.long, device=self.device)\n else:\n t = torch.randint(\n 0, self.num_timesteps, (x.shape[0],), device=self.device\n ).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n # if self.cond_stage_trainable:\n # c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset\n def rescale_bbox(bbox):\n x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])\n y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])\n w = min(bbox[2] / crop_coordinates[2], 1 - x0)\n h = min(bbox[3] / crop_coordinates[3], 1 - y0)\n return x0, y0, w, h\n\n return [rescale_bbox(b) for b in bboxes]\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is exptected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = (\n \"c_concat\" if self.model.conditioning_key == \"concat\" else \"c_crossattn\"\n )\n cond = {key: cond}\n\n if hasattr(self, \"split_input_params\"):\n assert len(cond) == 1 # todo can only deal with one conditioning atm\n assert not return_ids\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n\n h, w = x_noisy.shape[-2:]\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n x_noisy, ks, stride\n )\n\n z = unfold(x_noisy) # (bn, nc * prod(**ks), L)\n # Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]\n\n if (\n self.cond_stage_key in [\"image\", \"LR_image\", \"segmentation\", \"bbox_img\"]\n and self.model.conditioning_key\n ): # todo check for completeness\n c_key = next(iter(cond.keys())) # get key\n c = next(iter(cond.values())) # get value\n assert len(c) == 1 # todo extend to list with more than one elem\n c = c[0] # get element\n\n c = unfold(c)\n c = c.view(\n (c.shape[0], -1, ks[0], ks[1], c.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]\n\n elif self.cond_stage_key == \"coordinates_bbox\":\n assert (\n \"original_image_size\" in self.split_input_params\n ), \"BoudingBoxRescaling is missing original_image_size\"\n\n # assuming padding of unfold is always 0 and its dilation is always 1\n n_patches_per_row = int((w - ks[0]) / stride[0] + 1)\n full_img_h, full_img_w = self.split_input_params[\"original_image_size\"]\n # as we are operating on latents, we need the factor from the original image size to the\n # spatial latent size to properly rescale the crops for regenerating the bbox annotations\n num_downs = self.first_stage_model.encoder.num_resolutions - 1\n rescale_latent = 2 ** (num_downs)\n\n # get top left postions of patches as conforming for the bbbox tokenizer, therefore we\n # need to rescale the tl patch coordinates to be in between (0,1)\n tl_patch_coordinates = [\n (\n rescale_latent\n * stride[0]\n * (patch_nr % n_patches_per_row)\n / full_img_w,\n rescale_latent\n * stride[1]\n * (patch_nr // n_patches_per_row)\n / full_img_h,\n )\n for patch_nr in range(z.shape[-1])\n ]\n\n # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)\n patch_limits = [\n (\n x_tl,\n y_tl,\n rescale_latent * ks[0] / full_img_w,\n rescale_latent * ks[1] / full_img_h,\n )\n for x_tl, y_tl in tl_patch_coordinates\n ]\n # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]\n\n # tokenize crop coordinates for the bounding boxes of the respective patches\n patch_limits_tknzd = [\n torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(\n self.device\n )\n for bbox in patch_limits\n ] # list of length l with tensors of shape (1, 2)\n # cut tknzd crop position from conditioning\n assert isinstance(cond, dict), \"cond must be dict to be fed into model\"\n cut_cond = cond[\"c_crossattn\"][0][..., :-2].to(self.device)\n\n adapted_cond = torch.stack(\n [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]\n )\n adapted_cond = rearrange(adapted_cond, \"l b n -> (l b) n\")\n adapted_cond = self.get_learned_conditioning(adapted_cond)\n adapted_cond = rearrange(\n adapted_cond, \"(l b) n d -> l b n d\", l=z.shape[-1]\n )\n\n cond_list = [{\"c_crossattn\": [e]} for e in adapted_cond]\n\n else:\n cond_list = [\n cond for i in range(z.shape[-1])\n ] # Todo make this more efficient\n\n # apply model by loop over crops\n output_list = [\n self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])\n ]\n assert not isinstance(\n output_list[0], tuple\n ) # todo cant deal with multiple model outputs check this never happens\n\n o = torch.stack(output_list, axis=-1)\n o = o * weighting\n # Reverse reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n x_recon = fold(o) / normalization\n\n else:\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (\n extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - pred_xstart\n ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(\n mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0\n )\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n\n loss_dict = {}\n prefix = \"train\" if self.training else \"val\"\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f\"{prefix}/loss_simple\": loss_simple.mean()})\n\n if self.logvar.device != self.device:\n self.logvar = self.logvar.to(self.device)\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f\"{prefix}/loss_gamma\": loss.mean()})\n loss_dict.update({\"logvar\": self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f\"{prefix}/loss_vlb\": loss_vlb})\n loss += self.original_elbo_weight * loss_vlb\n loss_dict.update({f\"{prefix}/loss\": loss})\n\n return loss, loss_dict\n\n def p_mean_variance(\n self,\n x,\n c,\n t,\n clip_denoised: bool,\n return_codebook_ids=False,\n quantize_denoised=False,\n return_x0=False,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(\n self, model_out, x, t, c, **corrector_kwargs\n )\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1.0, 1.0)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(\n x_start=x_recon, x_t=x, t=t\n )\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(\n self,\n x,\n c,\n t,\n clip_denoised=False,\n repeat_noise=False,\n return_codebook_ids=False,\n quantize_denoised=False,\n return_x0=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(\n x=x,\n c=c,\n t=t,\n clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (\n 0.5 * model_log_variance\n ).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return (\n model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise,\n x0,\n )\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(\n self,\n cond,\n shape,\n verbose=True,\n callback=None,\n quantize_denoised=False,\n img_callback=None,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n batch_size=None,\n x_T=None,\n start_T=None,\n log_every_t=None,\n ):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = (\n tqdm(\n reversed(range(0, timesteps)),\n desc=\"Progressive Generation\",\n total=timesteps,\n )\n if verbose\n else reversed(range(0, timesteps))\n )\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n return_x0=True,\n temperature=temperature[i],\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(\n self,\n cond,\n shape,\n return_intermediates=False,\n x_T=None,\n verbose=True,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n start_T=None,\n log_every_t=None,\n ):\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = (\n tqdm(reversed(range(0, timesteps)), desc=\"Sampling t\", total=timesteps)\n if verbose\n else reversed(range(0, timesteps))\n )\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n )\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(\n self,\n cond,\n batch_size=16,\n return_intermediates=False,\n x_T=None,\n verbose=True,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n shape=None,\n **kwargs,\n ):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n return self.p_sample_loop(\n cond,\n shape,\n return_intermediates=return_intermediates,\n x_T=x_T,\n verbose=verbose,\n timesteps=timesteps,\n quantize_denoised=quantize_denoised,\n mask=mask,\n x0=x0,\n )\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(\n ddim_steps, batch_size, shape, cond, verbose=False, **kwargs\n )\n\n else:\n samples, intermediates = self.sample(\n cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs\n )\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(\n self, batch_size, null_label=None, image_size=512\n ):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n # todo: get null label from cond_stage_model\n raise NotImplementedError()\n c = repeat(c, \"1 ... -> b ...\", b=batch_size).to(self.device)\n cond = {}\n cond[\"c_crossattn\"] = [c]\n cond[\"c_concat\"] = [\n torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to(\n self.device\n )\n ]\n return cond\n\n @torch.no_grad()\n def log_images(\n self,\n batch,\n N=8,\n n_row=4,\n sample=True,\n ddim_steps=200,\n ddim_eta=1.0,\n return_keys=None,\n quantize_denoised=True,\n inpaint=True,\n plot_denoise_rows=False,\n plot_progressive_rows=True,\n plot_diffusion_rows=True,\n unconditional_guidance_scale=1.0,\n unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs,\n ):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(\n batch,\n self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N,\n )\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img(\n (x.shape[2], x.shape[3]),\n batch[self.cond_stage_key],\n size=x.shape[2] // 25,\n )\n log[\"conditioning\"] = xc\n elif self.cond_stage_key == \"class_label\":\n xc = log_txt_as_img(\n (x.shape[2], x.shape[3]),\n batch[\"human_label\"],\n size=x.shape[2] // 25,\n )\n log[\"conditioning\"] = xc\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), \"1 -> b\", b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, \"n b c h w -> b n c h w\")\n diffusion_grid = rearrange(diffusion_grid, \"b n c h w -> (b n) c h w\")\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n )\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if (\n quantize_denoised\n and not isinstance(self.first_stage_model, AutoencoderKL)\n and not isinstance(self.first_stage_model, IdentityFirstStage)\n ):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n quantize_denoised=True,\n )\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(\n N, unconditional_guidance_label, image_size=x.shape[-1]\n )\n # uc = torch.zeros_like(c)\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[\n f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"\n ] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n eta=ddim_eta,\n ddim_steps=ddim_steps,\n x0=z[:N],\n mask=mask,\n )\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1.0 - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n eta=ddim_eta,\n ddim_steps=ddim_steps,\n x0=z[:N],\n mask=mask,\n )\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(\n c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N,\n )\n prog_row = self._get_denoise_row_from_list(\n progressives, desc=\"Progressive Generation\"\n )\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = []\n if self.unet_trainable == \"attn\":\n print(\"Training only unet attention layers\")\n for n, m in self.model.named_modules():\n if isinstance(m, CrossAttention) and n.endswith(\"attn2\"):\n params.extend(m.parameters())\n if self.unet_trainable == \"conv_in\":\n print(\"Training only unet input conv layers\")\n params = list(self.model.diffusion_model.input_blocks[0][0].parameters())\n elif self.unet_trainable is True or self.unet_trainable == \"all\":\n print(\"Training the full unet\")\n params = list(self.model.parameters())\n else:\n raise ValueError(\n f\"Unrecognised setting for unet_trainable: {self.unet_trainable}\"\n )\n\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print(\"Diffusion model optimizing logvar\")\n params.append(self.logvar)\n\n if self.cc_projection is not None:\n params = params + list(self.cc_projection.parameters())\n print(\"========== optimizing for cc projection weight ==========\")\n\n opt = torch.optim.AdamW(\n [\n {\"params\": self.model.parameters(), \"lr\": lr},\n {\"params\": self.cc_projection.parameters(), \"lr\": 10.0 * lr},\n ],\n lr=lr,\n )\n if self.use_scheduler:\n assert \"target\" in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n \"scheduler\": LambdaLR(opt, lr_lambda=scheduler.schedule),\n \"interval\": \"step\",\n \"frequency\": 1,\n }\n ]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "load_model_from_config", "path": "ldm/util.py", "snippet": "def load_model_from_config(config, ckpt, device, vram_O=False, verbose=False):\n print(f\"[INFO] Loading model from {ckpt}\")\n pl_sd = torch.load(ckpt, map_location=\"cpu\")\n\n if \"global_step\" in pl_sd and verbose:\n print(f'[INFO] Global Step: {pl_sd[\"global_step\"]}')\n\n sd = pl_sd[\"state_dict\"]\n\n model = instantiate_from_config(config.model)\n m, u = model.load_state_dict(sd, strict=False)\n\n if len(m) > 0 and verbose:\n print(\"[INFO] Missing keys: \\n\", m)\n if len(u) > 0 and verbose:\n print(\"[INFO] Unexpected keys: \\n\", u)\n\n # manually load ema and delete it to save GPU memory\n if model.use_ema:\n if verbose:\n print(\"[INFO] Loading EMA\")\n model.model_ema.copy_to(model.model)\n del model.model_ema\n\n if vram_O:\n # we don't need decoder\n del model.first_stage_model.decoder\n\n torch.cuda.empty_cache()\n model.eval().to(device)\n\n return model" }, { "identifier": "make_T", "path": "util/pose.py", "snippet": "def make_T(theta, azimuth, distance, in_deg=False):\n if in_deg:\n theta, azimuth = theta.deg2rad(), azimuth.deg2rad()\n return torch.stack(\n (\n theta,\n torch.sin(azimuth),\n torch.cos(azimuth),\n distance,\n )\n )" }, { "identifier": "default", "path": "util/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" } ]
import itertools import torch import torch.nn as nn from dataclasses import dataclass from diffusers import DDIMScheduler from einops import rearrange from omegaconf import OmegaConf from ldm.lora import ( inject_trainable_lora_extended, monkeypatch_remove_lora, save_lora_weight, ) from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import load_model_from_config from util.pose import make_T from util.typing import * from util.util import default
16,827
return self def save_lora( self, ckpt_fp: str, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], ): save_lora_weight( self.model.model, ckpt_fp, target_replace_module=set(target_replace_module), ) print(f"[INFO] Saved LoRA to {ckpt_fp}") def remove_lora(self): print("[INFO] Removing LoRA") monkeypatch_remove_lora(self.model.model) self.require_grad_params = [] return self @torch.cuda.amp.autocast(enabled=False) def forward(self, batch, step_ratio=None): batch["image_cond"] = rearrange(batch["image_cond"], "b c h w -> b h w c") batch["image_target"] = rearrange(batch["image_target"], "b c h w -> b h w c") loss, _ = self.model.shared_step(batch, step_ratio=step_ratio) return loss def generate_from_tensor( self, image: Float[Tensor, "B 3 256 256"], theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], scale=3, ddim_steps=50, ddim_eta=1, in_deg: bool = False, ): if len(image) != len(theta): image = image.repeat(len(theta), 1, 1, 1) c_crossattn, c_concat = self.get_image_embeds(image) c_crossattn = self.clip_camera_projection( theta, azimuth, distance, c_crossattn, in_deg ) out = self.gen_from_cond( cond={"c_crossattn": c_crossattn, "c_concat": c_concat}, scale=scale, ddim_steps=ddim_steps, ddim_eta=ddim_eta, ) return out def generate_from_tensor_multi_cond( self, image: Float[Tensor, "B N 3 256 256"], theta: Float[Tensor, "B N"], azimuth: Float[Tensor, "B N"], distance: Float[Tensor, "B N"], scale=3, ddim_steps=50, ddim_eta=1, in_deg: bool = False, ): c_crossattn, c_concat = zip(*[self.get_image_embeds(x) for x in image]) c_crossattn, c_concat = torch.stack(c_crossattn), torch.stack(c_concat) c_crossattn = torch.stack( [ self.clip_camera_projection(t, a, d, c, in_deg) for t, a, d, c in zip(theta, azimuth, distance, c_crossattn) ] ) out = self.gen_from_cond( cond={"c_crossattn": c_crossattn, "c_concat": c_concat}, scale=scale, ddim_steps=ddim_steps, ddim_eta=ddim_eta, use_multi_view_condition=True, ) return out def generate( self, image: Float[Tensor, "B 3 256 256"], theta: float, azimuth: float, distance: float, in_deg: bool = True, **kwargs, ): theta = torch.tensor([theta], device=self.device) azimuth = torch.tensor([azimuth], device=self.device) distance = torch.tensor([distance], device=self.device) out = self.generate_from_tensor(image, theta, azimuth, distance, in_deg=in_deg, **kwargs) return out @torch.no_grad() def gen_from_cond( self, cond, scale=3, ddim_steps=50, ddim_eta=1, use_multi_view_condition=False ): B = len(cond["c_crossattn"]) if use_multi_view_condition: N = len(cond["c_crossattn"][0]) latent = torch.randn((B, 4, 32, 32), device=self.device) self.scheduler.set_timesteps(ddim_steps) cond_ = None # temporary condition for t in self.scheduler.timesteps: x_in = torch.cat([latent] * 2) t_in = torch.cat([t.reshape(1).repeat(B)] * 2).to(self.device) if use_multi_view_condition: # multi-view stochastic condition index = torch.randint(0, N, (B,)) cond_ = { "c_crossattn": cond["c_crossattn"][torch.arange(B), index], "c_concat": cond["c_concat"][torch.arange(B), index], } cond_ = self.make_cond(cond_) else:
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32 self.model: LatentDiffusion = load_model_from_config( self.pretrained_config, self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), ) lora_params, _ = inject_trainable_lora_extended( self.model.model, target_replace_module=set(target_replace_module), r=rank, loras=ckpt_fp, eval=eval, ) if not eval: self.require_grad_params += itertools.chain(*lora_params) return self def save_lora( self, ckpt_fp: str, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], ): save_lora_weight( self.model.model, ckpt_fp, target_replace_module=set(target_replace_module), ) print(f"[INFO] Saved LoRA to {ckpt_fp}") def remove_lora(self): print("[INFO] Removing LoRA") monkeypatch_remove_lora(self.model.model) self.require_grad_params = [] return self @torch.cuda.amp.autocast(enabled=False) def forward(self, batch, step_ratio=None): batch["image_cond"] = rearrange(batch["image_cond"], "b c h w -> b h w c") batch["image_target"] = rearrange(batch["image_target"], "b c h w -> b h w c") loss, _ = self.model.shared_step(batch, step_ratio=step_ratio) return loss def generate_from_tensor( self, image: Float[Tensor, "B 3 256 256"], theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], scale=3, ddim_steps=50, ddim_eta=1, in_deg: bool = False, ): if len(image) != len(theta): image = image.repeat(len(theta), 1, 1, 1) c_crossattn, c_concat = self.get_image_embeds(image) c_crossattn = self.clip_camera_projection( theta, azimuth, distance, c_crossattn, in_deg ) out = self.gen_from_cond( cond={"c_crossattn": c_crossattn, "c_concat": c_concat}, scale=scale, ddim_steps=ddim_steps, ddim_eta=ddim_eta, ) return out def generate_from_tensor_multi_cond( self, image: Float[Tensor, "B N 3 256 256"], theta: Float[Tensor, "B N"], azimuth: Float[Tensor, "B N"], distance: Float[Tensor, "B N"], scale=3, ddim_steps=50, ddim_eta=1, in_deg: bool = False, ): c_crossattn, c_concat = zip(*[self.get_image_embeds(x) for x in image]) c_crossattn, c_concat = torch.stack(c_crossattn), torch.stack(c_concat) c_crossattn = torch.stack( [ self.clip_camera_projection(t, a, d, c, in_deg) for t, a, d, c in zip(theta, azimuth, distance, c_crossattn) ] ) out = self.gen_from_cond( cond={"c_crossattn": c_crossattn, "c_concat": c_concat}, scale=scale, ddim_steps=ddim_steps, ddim_eta=ddim_eta, use_multi_view_condition=True, ) return out def generate( self, image: Float[Tensor, "B 3 256 256"], theta: float, azimuth: float, distance: float, in_deg: bool = True, **kwargs, ): theta = torch.tensor([theta], device=self.device) azimuth = torch.tensor([azimuth], device=self.device) distance = torch.tensor([distance], device=self.device) out = self.generate_from_tensor(image, theta, azimuth, distance, in_deg=in_deg, **kwargs) return out @torch.no_grad() def gen_from_cond( self, cond, scale=3, ddim_steps=50, ddim_eta=1, use_multi_view_condition=False ): B = len(cond["c_crossattn"]) if use_multi_view_condition: N = len(cond["c_crossattn"][0]) latent = torch.randn((B, 4, 32, 32), device=self.device) self.scheduler.set_timesteps(ddim_steps) cond_ = None # temporary condition for t in self.scheduler.timesteps: x_in = torch.cat([latent] * 2) t_in = torch.cat([t.reshape(1).repeat(B)] * 2).to(self.device) if use_multi_view_condition: # multi-view stochastic condition index = torch.randint(0, N, (B,)) cond_ = { "c_crossattn": cond["c_crossattn"][torch.arange(B), index], "c_concat": cond["c_concat"][torch.arange(B), index], } cond_ = self.make_cond(cond_) else:
cond_ = default(cond_, self.make_cond(cond))
6
2023-12-17 12:45:38+00:00
24k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "VisualSearch/model/llava/constants.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "IGNORE_INDEX", "path": "VisualSearch/model/llava/constants.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "VisualSearch/model/llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "tokenizer_image_token", "path": "VisualSearch/model/llava/mm_utils.py", "snippet": "def tokenizer_image_token(\n prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None\n):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split(\"<image>\")]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if (\n len(prompt_chunks) > 0\n and len(prompt_chunks[0]) > 0\n and prompt_chunks[0][0] == tokenizer.bos_token_id\n ):\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == \"pt\":\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f\"Unsupported tensor type: {return_tensors}\")\n return input_ids" }, { "identifier": "get_mask_from_json", "path": "VisualSearch/utils/data_processing.py", "snippet": "def get_mask_from_json(json_path, img):\n try:\n with open(json_path, \"r\") as r:\n anno = json.loads(r.read())\n except:\n with open(json_path, \"r\", encoding=\"cp1252\") as r:\n anno = json.loads(r.read())\n\n inform = anno[\"shapes\"]\n comments = anno[\"text\"]\n is_sentence = anno[\"is_sentence\"]\n\n height, width = img.shape[:2]\n\n ### sort polies by area\n area_list = []\n valid_poly_list = []\n for i in inform:\n label_id = i[\"label\"]\n points = i[\"points\"]\n if \"flag\" == label_id.lower(): ## meaningless deprecated annotations\n continue\n\n tmp_mask = np.zeros((height, width), dtype=np.uint8)\n cv2.polylines(tmp_mask, np.array([points], dtype=np.int32), True, 1, 1)\n cv2.fillPoly(tmp_mask, np.array([points], dtype=np.int32), 1)\n tmp_area = tmp_mask.sum()\n\n area_list.append(tmp_area)\n valid_poly_list.append(i)\n\n ### ground-truth mask\n sort_index = np.argsort(area_list)[::-1].astype(np.int32)\n sort_index = list(sort_index)\n sort_inform = []\n for s_idx in sort_index:\n sort_inform.append(valid_poly_list[s_idx])\n\n mask = np.zeros((height, width), dtype=np.uint8)\n for i in sort_inform:\n label_id = i[\"label\"]\n points = i[\"points\"]\n\n if \"ignore\" in label_id.lower():\n label_value = 255 # ignored during evaluation\n else:\n label_value = 1 # target\n\n cv2.polylines(mask, np.array([points], dtype=np.int32), True, label_value, 1)\n cv2.fillPoly(mask, np.array([points], dtype=np.int32), label_value)\n\n return mask, comments, is_sentence" }, { "identifier": "REFER", "path": "VisualSearch/utils/refer.py", "snippet": "class REFER:\n def __init__(self, data_root, dataset=\"refcoco\", splitBy=\"unc\"):\n # provide data_root folder which contains refclef, refcoco, refcoco+ and refcocog\n # also provide dataset name and splitBy information\n # e.g., dataset = 'refcoco', splitBy = 'unc'\n print(\"loading dataset %s into memory...\" % dataset)\n self.ROOT_DIR = osp.abspath(osp.dirname(__file__))\n self.DATA_DIR = osp.join(data_root, dataset)\n if dataset in [\"refcoco\", \"refcoco+\", \"refcocog\"]:\n self.IMAGE_DIR = osp.join(data_root, \"images/mscoco/images/train2014\")\n elif dataset == \"refclef\":\n self.IMAGE_DIR = osp.join(data_root, \"images/saiapr_tc-12\")\n else:\n print(\"No refer dataset is called [%s]\" % dataset)\n sys.exit()\n\n self.dataset = dataset\n\n # load refs from data/dataset/refs(dataset).json\n tic = time.time()\n\n ref_file = osp.join(self.DATA_DIR, \"refs(\" + splitBy + \").p\")\n print(\"ref_file: \", ref_file)\n self.data = {}\n self.data[\"dataset\"] = dataset\n self.data[\"refs\"] = pickle.load(open(ref_file, \"rb\"))\n\n # load annotations from data/dataset/instances.json\n instances_file = osp.join(self.DATA_DIR, \"instances.json\")\n instances = json.load(open(instances_file, \"rb\"))\n self.data[\"images\"] = instances[\"images\"]\n self.data[\"annotations\"] = instances[\"annotations\"]\n self.data[\"categories\"] = instances[\"categories\"]\n\n # create index\n self.createIndex()\n print(\"DONE (t=%.2fs)\" % (time.time() - tic))\n\n def createIndex(self):\n # create sets of mapping\n # 1) Refs: \t \t{ref_id: ref}\n # 2) Anns: \t \t{ann_id: ann}\n # 3) Imgs:\t\t \t{image_id: image}\n # 4) Cats: \t \t{category_id: category_name}\n # 5) Sents: \t{sent_id: sent}\n # 6) imgToRefs: \t{image_id: refs}\n # 7) imgToAnns: \t{image_id: anns}\n # 8) refToAnn: \t{ref_id: ann}\n # 9) annToRef: \t{ann_id: ref}\n # 10) catToRefs: \t{category_id: refs}\n # 11) sentToRef: \t{sent_id: ref}\n # 12) sentToTokens: {sent_id: tokens}\n print(\"creating index...\")\n # fetch info from instances\n Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {}\n for ann in self.data[\"annotations\"]:\n Anns[ann[\"id\"]] = ann\n imgToAnns[ann[\"image_id\"]] = imgToAnns.get(ann[\"image_id\"], []) + [ann]\n for img in self.data[\"images\"]:\n Imgs[img[\"id\"]] = img\n for cat in self.data[\"categories\"]:\n Cats[cat[\"id\"]] = cat[\"name\"]\n\n # fetch info from refs\n Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {}\n Sents, sentToRef, sentToTokens = {}, {}, {}\n for ref in self.data[\"refs\"]:\n # ids\n ref_id = ref[\"ref_id\"]\n ann_id = ref[\"ann_id\"]\n category_id = ref[\"category_id\"]\n image_id = ref[\"image_id\"]\n\n # add mapping related to ref\n Refs[ref_id] = ref\n imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref]\n catToRefs[category_id] = catToRefs.get(category_id, []) + [ref]\n refToAnn[ref_id] = Anns[ann_id]\n annToRef[ann_id] = ref\n\n # add mapping of sent\n for sent in ref[\"sentences\"]:\n Sents[sent[\"sent_id\"]] = sent\n sentToRef[sent[\"sent_id\"]] = ref\n sentToTokens[sent[\"sent_id\"]] = sent[\"tokens\"]\n\n # create class members\n self.Refs = Refs\n self.Anns = Anns\n self.Imgs = Imgs\n self.Cats = Cats\n self.Sents = Sents\n self.imgToRefs = imgToRefs\n self.imgToAnns = imgToAnns\n self.refToAnn = refToAnn\n self.annToRef = annToRef\n self.catToRefs = catToRefs\n self.sentToRef = sentToRef\n self.sentToTokens = sentToTokens\n print(\"index created.\")\n\n def getRefIds(self, image_ids=[], cat_ids=[], ref_ids=[], split=\"\"):\n image_ids = image_ids if type(image_ids) == list else [image_ids]\n cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if len(image_ids) == len(cat_ids) == len(ref_ids) == len(split) == 0:\n refs = self.data[\"refs\"]\n else:\n if not len(image_ids) == 0:\n refs = [self.imgToRefs[image_id] for image_id in image_ids]\n else:\n refs = self.data[\"refs\"]\n if not len(cat_ids) == 0:\n refs = [ref for ref in refs if ref[\"category_id\"] in cat_ids]\n if not len(ref_ids) == 0:\n refs = [ref for ref in refs if ref[\"ref_id\"] in ref_ids]\n if not len(split) == 0:\n if split in [\"testA\", \"testB\", \"testC\"]:\n refs = [\n ref for ref in refs if split[-1] in ref[\"split\"]\n ] # we also consider testAB, testBC, ...\n elif split in [\"testAB\", \"testBC\", \"testAC\"]:\n refs = [\n ref for ref in refs if ref[\"split\"] == split\n ] # rarely used I guess...\n elif split == \"test\":\n refs = [ref for ref in refs if \"test\" in ref[\"split\"]]\n elif split == \"train\" or split == \"val\":\n refs = [ref for ref in refs if ref[\"split\"] == split]\n else:\n print(\"No such split [%s]\" % split)\n sys.exit()\n ref_ids = [ref[\"ref_id\"] for ref in refs]\n return ref_ids\n\n def getAnnIds(self, image_ids=[], cat_ids=[], ref_ids=[]):\n image_ids = image_ids if type(image_ids) == list else [image_ids]\n cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if len(image_ids) == len(cat_ids) == len(ref_ids) == 0:\n ann_ids = [ann[\"id\"] for ann in self.data[\"annotations\"]]\n else:\n if not len(image_ids) == 0:\n lists = [\n self.imgToAnns[image_id]\n for image_id in image_ids\n if image_id in self.imgToAnns\n ] # list of [anns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.data[\"annotations\"]\n if not len(cat_ids) == 0:\n anns = [ann for ann in anns if ann[\"category_id\"] in cat_ids]\n ann_ids = [ann[\"id\"] for ann in anns]\n if not len(ref_ids) == 0:\n ids = set(ann_ids).intersection(\n set([self.Refs[ref_id][\"ann_id\"] for ref_id in ref_ids])\n )\n return ann_ids\n\n def getImgIds(self, ref_ids=[]):\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if not len(ref_ids) == 0:\n image_ids = list(set([self.Refs[ref_id][\"image_id\"] for ref_id in ref_ids]))\n else:\n image_ids = self.Imgs.keys()\n return image_ids\n\n def getCatIds(self):\n return self.Cats.keys()\n\n def loadRefs(self, ref_ids=[]):\n if type(ref_ids) == list:\n return [self.Refs[ref_id] for ref_id in ref_ids]\n elif type(ref_ids) == int:\n return [self.Refs[ref_ids]]\n\n def loadAnns(self, ann_ids=[]):\n if type(ann_ids) == list:\n return [self.Anns[ann_id] for ann_id in ann_ids]\n elif type(ann_ids) == int or type(ann_ids) == unicode:\n return [self.Anns[ann_ids]]\n\n def loadImgs(self, image_ids=[]):\n if type(image_ids) == list:\n return [self.Imgs[image_id] for image_id in image_ids]\n elif type(image_ids) == int:\n return [self.Imgs[image_ids]]\n\n def loadCats(self, cat_ids=[]):\n if type(cat_ids) == list:\n return [self.Cats[cat_id] for cat_id in cat_ids]\n elif type(cat_ids) == int:\n return [self.Cats[cat_ids]]\n\n def getRefBox(self, ref_id):\n ref = self.Refs[ref_id]\n ann = self.refToAnn[ref_id]\n return ann[\"bbox\"] # [x, y, w, h]\n\n def showRef(self, ref, seg_box=\"seg\"):\n ax = plt.gca()\n # show image\n image = self.Imgs[ref[\"image_id\"]]\n I = io.imread(osp.join(self.IMAGE_DIR, image[\"file_name\"]))\n ax.imshow(I)\n # show refer expression\n for sid, sent in enumerate(ref[\"sentences\"]):\n print(\"%s. %s\" % (sid + 1, sent[\"sent\"]))\n # show segmentations\n if seg_box == \"seg\":\n ann_id = ref[\"ann_id\"]\n ann = self.Anns[ann_id]\n polygons = []\n color = []\n c = \"none\"\n if type(ann[\"segmentation\"][0]) == list:\n # polygon used for refcoco*\n for seg in ann[\"segmentation\"]:\n poly = np.array(seg).reshape((len(seg) / 2, 2))\n polygons.append(Polygon(poly, True, alpha=0.4))\n color.append(c)\n p = PatchCollection(\n polygons,\n facecolors=color,\n edgecolors=(1, 1, 0, 0),\n linewidths=3,\n alpha=1,\n )\n ax.add_collection(p) # thick yellow polygon\n p = PatchCollection(\n polygons,\n facecolors=color,\n edgecolors=(1, 0, 0, 0),\n linewidths=1,\n alpha=1,\n )\n ax.add_collection(p) # thin red polygon\n else:\n # mask used for refclef\n rle = ann[\"segmentation\"]\n m = mask.decode(rle)\n img = np.ones((m.shape[0], m.shape[1], 3))\n color_mask = np.array([2.0, 166.0, 101.0]) / 255\n for i in range(3):\n img[:, :, i] = color_mask[i]\n ax.imshow(np.dstack((img, m * 0.5)))\n # show bounding-box\n elif seg_box == \"box\":\n ann_id = ref[\"ann_id\"]\n ann = self.Anns[ann_id]\n bbox = self.getRefBox(ref[\"ref_id\"])\n box_plot = Rectangle(\n (bbox[0], bbox[1]),\n bbox[2],\n bbox[3],\n fill=False,\n edgecolor=\"green\",\n linewidth=3,\n )\n ax.add_patch(box_plot)\n\n def getMask(self, ref):\n # return mask, area and mask-center\n ann = self.refToAnn[ref[\"ref_id\"]]\n image = self.Imgs[ref[\"image_id\"]]\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(ann[\"segmentation\"], image[\"height\"], image[\"width\"])\n else:\n rle = ann[\"segmentation\"]\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n # compute area\n area = sum(mask.area(rle)) # should be close to ann['area']\n return {\"mask\": m, \"area\": area}\n # # position\n # position_x = np.mean(np.where(m==1)[1]) # [1] means columns (matlab style) -> x (c style)\n # position_y = np.mean(np.where(m==1)[0]) # [0] means rows (matlab style) -> y (c style)\n # # mass position (if there were multiple regions, we use the largest one.)\n # label_m = label(m, connectivity=m.ndim)\n # regions = regionprops(label_m)\n # if len(regions) > 0:\n # \tlargest_id = np.argmax(np.array([props.filled_area for props in regions]))\n # \tlargest_props = regions[largest_id]\n # \tmass_y, mass_x = largest_props.centroid\n # else:\n # \tmass_x, mass_y = position_x, position_y\n # # if centroid is not in mask, we find the closest point to it from mask\n # if m[mass_y, mass_x] != 1:\n # \tprint('Finding closes mask point ...')\n # \tkernel = np.ones((10, 10),np.uint8)\n # \tme = cv2.erode(m, kernel, iterations = 1)\n # \tpoints = zip(np.where(me == 1)[0].tolist(), np.where(me == 1)[1].tolist()) # row, col style\n # \tpoints = np.array(points)\n # \tdist = np.sum((points - (mass_y, mass_x))**2, axis=1)\n # \tid = np.argsort(dist)[0]\n # \tmass_y, mass_x = points[id]\n # \t# return\n # return {'mask': m, 'area': area, 'position_x': position_x, 'position_y': position_y, 'mass_x': mass_x, 'mass_y': mass_y}\n # # show image and mask\n # I = io.imread(osp.join(self.IMAGE_DIR, image['file_name']))\n # plt.figure()\n # plt.imshow(I)\n # ax = plt.gca()\n # img = np.ones( (m.shape[0], m.shape[1], 3) )\n # color_mask = np.array([2.0,166.0,101.0])/255\n # for i in range(3):\n # img[:,:,i] = color_mask[i]\n # ax.imshow(np.dstack( (img, m*0.5) ))\n # plt.show()\n\n def showMask(self, ref):\n M = self.getMask(ref)\n msk = M[\"mask\"]\n ax = plt.gca()\n ax.imshow(msk)" }, { "identifier": "ReferSegDataset", "path": "VisualSearch/utils/refer_seg_dataset.py", "snippet": "class ReferSegDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n refer_seg_data=\"refclef||refcoco||refcoco+||refcocog\",\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n DATA_DIR = os.path.join(base_dir, \"refer_seg\")\n self.refer_seg_ds_list = refer_seg_data.split(\n \"||\"\n ) # ['refclef', 'refcoco', 'refcoco+', 'refcocog']\n self.refer_seg_data = {}\n for ds in self.refer_seg_ds_list:\n if ds == \"refcocog\":\n splitBy = \"umd\"\n else:\n splitBy = \"unc\"\n\n if ds == \"grefcoco\":\n refer_api = G_REFER(DATA_DIR, ds, splitBy)\n else:\n refer_api = REFER(DATA_DIR, ds, splitBy)\n ref_ids_train = refer_api.getRefIds(split=\"train\")\n images_ids_train = refer_api.getImgIds(ref_ids=ref_ids_train)\n refs_train = refer_api.loadRefs(ref_ids=ref_ids_train)\n\n refer_seg_ds = {}\n refer_seg_ds[\"images\"] = []\n loaded_images = refer_api.loadImgs(image_ids=images_ids_train)\n\n for item in loaded_images:\n item = item.copy()\n if ds == \"refclef\":\n item[\"file_name\"] = os.path.join(\n DATA_DIR, \"images/saiapr_tc-12\", item[\"file_name\"]\n )\n else:\n item[\"file_name\"] = os.path.join(\n DATA_DIR, \"images/mscoco/images/train2014\", item[\"file_name\"]\n )\n refer_seg_ds[\"images\"].append(item)\n refer_seg_ds[\"annotations\"] = refer_api.Anns # anns_train\n\n print(\n \"dataset {} (refs {}) (train split) has {} images and {} annotations.\".format(\n ds,\n splitBy,\n len(refer_seg_ds[\"images\"]),\n len(refer_seg_ds[\"annotations\"]),\n )\n )\n\n img2refs = {}\n for ref in refs_train:\n image_id = ref[\"image_id\"]\n img2refs[image_id] = img2refs.get(image_id, []) + [\n ref,\n ]\n refer_seg_ds[\"img2refs\"] = img2refs\n self.refer_seg_data[ds] = refer_seg_ds\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = random.randint(0, len(self.refer_seg_ds_list) - 1)\n ds = self.refer_seg_ds_list[ds]\n refer_seg_ds = self.refer_seg_data[ds]\n images = refer_seg_ds[\"images\"]\n annotations = refer_seg_ds[\"annotations\"]\n img2refs = refer_seg_ds[\"img2refs\"]\n idx = random.randint(0, len(images) - 1)\n image_info = images[idx]\n image_path = image_info[\"file_name\"]\n image_id = image_info[\"id\"]\n refs = img2refs[image_id]\n if len(refs) == 0:\n return self.__getitem__(0)\n\n sents = []\n ann_ids = []\n for ref in refs:\n for sent in ref[\"sentences\"]:\n text = sent[\"sent\"]\n sents.append(text)\n ann_ids.append(ref[\"ann_id\"])\n if len(sents) >= self.num_classes_per_sample:\n sampled_inds = np.random.choice(\n list(range(len(sents))), size=self.num_classes_per_sample, replace=False\n )\n else:\n sampled_inds = list(range(len(sents)))\n sampled_sents = np.vectorize(sents.__getitem__)(sampled_inds).tolist()\n # sampled_ann_ids = np.vectorize(ann_ids.__getitem__)(sampled_inds).tolist()\n sampled_ann_ids = [ann_ids[ind] for ind in sampled_inds]\n sampled_classes = sampled_sents\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n questions = []\n answers = []\n for text in sampled_classes:\n text = text.strip()\n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n answers.append(random.choice(self.answer_list))\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n flag = False\n masks = []\n bboxes_labels = []\n for ann_id in sampled_ann_ids:\n if isinstance(ann_id, list):\n assert False\n flag = True\n if -1 in ann_id:\n assert len(ann_id) == 1\n m = np.zeros((image_info[\"height\"], image_info[\"width\"])).astype(\n np.uint8\n )\n else:\n m_final = np.zeros(\n (image_info[\"height\"], image_info[\"width\"])\n ).astype(np.uint8)\n for ann_id_i in ann_id:\n ann = annotations[ann_id_i]\n\n if len(ann[\"segmentation\"]) == 0:\n m = np.zeros(\n (image_info[\"height\"], image_info[\"width\"])\n ).astype(np.uint8)\n else:\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(\n ann[\"segmentation\"],\n image_info[\"height\"],\n image_info[\"width\"],\n )\n else:\n rle = ann[\"segmentation\"]\n for i in range(len(rle)):\n if not isinstance(rle[i][\"counts\"], bytes):\n rle[i][\"counts\"] = rle[i][\"counts\"].encode()\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n m_final = m_final | m\n m = m_final\n masks.append(m)\n continue\n \n ann = annotations[ann_id]\n cur_bboxes = [ann['bbox']]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n \n if len(ann[\"segmentation\"]) == 0:\n m = np.zeros((image_info[\"height\"], image_info[\"width\"])).astype(\n np.uint8\n )\n masks.append(m)\n continue\n\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(\n ann[\"segmentation\"], image_info[\"height\"], image_info[\"width\"]\n )\n else:\n rle = ann[\"segmentation\"]\n for i in range(len(rle)):\n if not isinstance(rle[i][\"counts\"], bytes):\n rle[i][\"counts\"] = rle[i][\"counts\"].encode()\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n masks.append(m)\n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [1]*len(bboxes_labels)\n masks = np.stack(masks, axis=0)\n\n\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "SegDetDataset", "path": "VisualSearch/utils/general_segdet_dataset.py", "snippet": "class SegDetDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n general_segdet_data=\"objects365||cocostuff||paco_lvis\",\n general_segdet_sample_rate=[2,1,1]\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n self.data2list = {}\n self.data2classes = {}\n\n self.general_segdet_datas = general_segdet_data.split(\"||\")\n num_images = []\n for ds in self.general_segdet_datas:\n if ds == \"cocostuff\":\n classes, images, labels, bboxes = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, labels, bboxes)\n elif ds == \"objects365\":\n classes, images, bboxes = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, bboxes)\n else:\n classes, images, labels = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, labels)\n self.data2classes[ds] = classes\n num_images.append(len(images))\n sample_rate = np.array(general_segdet_sample_rate)\n self.sample_rate = sample_rate / sample_rate.sum()\n\n if \"cocostuff\" in self.general_segdet_datas:\n self.cocostuff_class2index = {\n c: i for i, c in enumerate(self.data2classes[\"cocostuff\"])\n }\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = np.random.choice(list(range(len(self.general_segdet_datas))), p=self.sample_rate)\n ds = self.general_segdet_datas[ds]\n\n if ds in [\"paco_lvis\"]:\n class_map = self.data2classes[ds]\n img_ids, coco_api = self.data2list[ds]\n idx = random.randint(0, len(img_ids) - 1)\n img_id = img_ids[idx]\n image_info = coco_api.loadImgs([img_id])[0]\n file_name = image_info[\"file_name\"]\n if ds == \"pascal_part\":\n file_name = os.path.join(\n \"VOCdevkit\", \"VOC2010\", \"JPEGImages\", file_name\n )\n image_path = os.path.join(self.base_dir, \"vlpart\", ds, file_name)\n elif ds == \"paco_lvis\":\n image_path = os.path.join(self.base_dir, \"coco2017\", file_name)\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n annIds = coco_api.getAnnIds(imgIds=image_info[\"id\"])\n anns = coco_api.loadAnns(annIds)\n anns_category2instances = dict()\n for ann in anns:\n category_id = ann['category_id']\n if category_id not in anns_category2instances:\n anns_category2instances[category_id] = []\n anns_category2instances[category_id].append(ann)\n if len(anns_category2instances) == 0:\n return self.__getitem__(0)\n if len(anns_category2instances) >= self.num_classes_per_sample:\n sampled_anns = np.random.choice(\n list(anns_category2instances.keys()), size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_anns = list(anns_category2instances.keys())\n sampled_classes = []\n for category_id in sampled_anns:\n sampled_cls = class_map[category_id]\n if isinstance(sampled_cls, tuple):\n obj, part = sampled_cls\n if random.random() < 0.5:\n name = obj + \" \" + part\n else:\n name = \"the {} of the {}\".format(part, obj)\n else:\n name = sampled_cls\n name = name.replace('_', ' ')\n sampled_classes.append(name)\n\n elif ds in [\"cocostuff\"]:\n image, labels, bboxes_all = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n label_path = labels[idx]\n bboxes = bboxes_all[idx]\n label = Image.open(label_path)\n label = np.array(label)\n if ds == \"ade20k\":\n label[label == 0] = 255\n label -= 1\n label[label == 254] = 255\n elif ds == \"cocostuff\":\n for c, i in self.cocostuff_class2index.items():\n if \"-\" in c:\n label[label == i] = 255\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n unique_label = np.unique(label).tolist()\n if 255 in unique_label:\n unique_label.remove(255)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n elif ds in ['objects365']:\n image, bboxes_all = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n bboxes = bboxes_all[idx]\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n unique_label = set()\n for bbox_info in bboxes:\n unique_label.add(bbox_info['category_id'])\n unique_label = list(unique_label)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n\n questions = []\n answers = []\n class_ids = []\n bboxes_labels = []\n for i, sampled_cls in enumerate(sampled_classes):\n text = sampled_cls\n if ds in ['objects365']:\n text = random.sample(text.split('/'), 1)[0]\n \n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n\n answers.append(random.choice(self.answer_list))\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n category_id = sampled_anns[i]\n cur_bboxes = [instance['bbox'] for instance in anns_category2instances[category_id]]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n continue\n\n class_id = self.data2classes[ds].tolist().index(sampled_cls)\n class_ids.append(class_id)\n if ds in ['objects365']:\n cur_bboxes = [bbox['bbox'] for bbox in bboxes if bbox['category_id'] == class_id]\n else:\n cur_bboxes = [bbox['bbox'] for bbox in bboxes if bbox['category_id']-1 == class_id]\n cur_bboxes = cur_bboxes[:100]\n assert len(cur_bboxes) > 0\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [1]*len(bboxes_labels)\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n masks = []\n for category_id in sampled_anns:\n try:\n cur_anns = anns_category2instances[category_id]\n cur_mask = None\n for ann in cur_anns:\n if cur_mask is None:\n cur_mask = coco_api.annToMask(ann)\n else:\n cur_mask = cur_mask | coco_api.annToMask(ann)\n assert cur_mask is not None\n masks.append(cur_mask)\n except Exception as e:\n print(e)\n return self.__getitem__(0)\n\n masks = np.stack(masks, axis=0)\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n elif ds in ['objects365']:\n masks = torch.rand(len(bboxes_labels), *original_size)\n label = torch.ones(original_size) * self.ignore_label\n masks_valid = [0]*len(bboxes_labels)\n else:\n label = torch.from_numpy(label).long()\n masks = []\n for class_id in class_ids:\n masks.append(label == class_id)\n masks = torch.stack(masks, dim=0)\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "MixedGroundingDataset", "path": "VisualSearch/utils/mixed_grounding_dataset.py", "snippet": "class MixedGroundingDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n ):\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n with open(os.path.join(base_dir, 'MixedGrounding', 'goldG_train.json')) as f:\n self.images = json.load(f)\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n\n idx = random.randint(0, len(self.images) - 1)\n image_info = self.images[idx]\n image_data_source = image_info['data_source']\n file_name = image_info[\"file_name\"]\n assert image_data_source in ['coco', 'vg', 'flickr']\n if image_data_source == 'coco':\n image_path = os.path.join(self.base_dir, 'coco2014/train2014', file_name)\n elif image_data_source == 'vg':\n image_path = os.path.join(self.base_dir, 'MixedGrounding/GQA/images', file_name)\n else:\n image_path = os.path.join(self.base_dir, 'MixedGrounding/flickr30k-images', file_name)\n caption = image_info['caption']\n instances = image_info['instances']\n if len(instances) == 0:\n return self.__getitem__(0)\n\n if len(instances) >= self.num_classes_per_sample:\n sampled_inds = np.random.choice(\n list(range(len(instances))), size=self.num_classes_per_sample, replace=False\n )\n else:\n sampled_inds = list(range(len(instances)))\n\n sampled_classes = sampled_inds\n \n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n questions = []\n answers = []\n bboxes_labels = []\n for sample_ind in sampled_inds:\n text = []\n tokens_positive = instances[sample_ind]['tokens_positive']\n for token in tokens_positive:\n text.append(caption[token[0]:token[1]])\n text = \" \".join(text)\n text = text.strip()\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n answers.append(random.choice(self.answer_list))\n\n cur_bboxes = [instances[sample_ind]['bbox']]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n \n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [0]*len(bboxes_labels)\n masks = torch.rand(len(bboxes_labels), *original_size)\n label = torch.ones(original_size) * self.ignore_label\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "VQADataset", "path": "VisualSearch/utils/vqa_dataset.py", "snippet": "class VQADataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_image_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n vqa_data=\"possible_locations_conv_86k||llava_instruct_150k\",\n vqa_sample_rate=[2,1],\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_image_dir = base_image_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n DATA_DIR = os.path.join(base_image_dir, \"vsm_vqa_data\")\n self.vqa_image_root = os.path.join(base_image_dir, \"coco2017/train2017\")\n vqa_datas = vqa_data.split(\"||\")\n self.vqa_datas = []\n for data in vqa_datas:\n with open(os.path.join(DATA_DIR, \"{}.json\".format(data))) as f:\n data = json.load(f)\n self.vqa_datas.append(data)\n sample_rate = np.array(vqa_sample_rate)\n self.sample_rate = sample_rate / sample_rate.sum()\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = np.random.choice(list(range(len(self.vqa_datas))), p=self.sample_rate)\n ds = self.vqa_datas[ds]\n idx = random.randint(0, len(ds) - 1)\n item = ds[idx]\n image_path = os.path.join(self.vqa_image_root, item[\"image\"])\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n ori_size = image.shape[:2]\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n conv = conversation_lib.default_conversation.copy()\n source = item[\"conversations\"]\n source = preprocess_multimodal(\n copy.deepcopy(source),\n mm_use_im_start_end=conv.sep_style == conversation_lib.SeparatorStyle.TWO,\n )\n roles = {\"human\": conv.roles[0], \"gpt\": conv.roles[1]}\n conversations = []\n if roles[source[0][\"from\"]] != conv.roles[0]:\n # Skip the first one if it is not from human\n source = source[1:]\n conv.messages = []\n for j, sentence in enumerate(source):\n role = roles[sentence[\"from\"]]\n assert role == conv.roles[j % 2], f\"{j}\"\n conv.append_message(role, sentence[\"value\"])\n conversations.append(conv.get_prompt())\n\n questions = conversations\n sampled_classes = conversations\n\n masks = torch.rand(1, *ori_size)\n label = torch.ones(ori_size) * self.ignore_label\n bboxes_labels = [torch.tensor([[0.5,0.5,1.0,1.0]])]\n bboxes_valid = [0]\n masks_valid = [0]\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "box_xyxy_to_cxcywh", "path": "VisualSearch/utils/utils.py", "snippet": "def box_xyxy_to_cxcywh(x):\n x0, y0, x1, y1 = x.unbind(-1)\n b = [(x0 + x1) / 2, (y0 + y1) / 2,\n (x1 - x0), (y1 - y0)]\n return torch.stack(b, dim=-1)" }, { "identifier": "expand2square", "path": "VisualSearch/utils/utils.py", "snippet": "def expand2square(pil_img, background_color):\n width, height = pil_img.size\n if width == height:\n return pil_img\n elif width > height:\n result = Image.new(pil_img.mode, (width, width), background_color)\n result.paste(pil_img, (0, 0))\n return result\n else:\n result = Image.new(pil_img.mode, (height, height), background_color)\n result.paste(pil_img, (0, 0))\n return result" } ]
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
16,370
elif len(splits) == 3: self.base_dir = os.path.join(self.base_dir, 'refer_seg') ds, splitBy, split = splits refer_api = REFER(self.base_dir, ds, splitBy) ref_ids_val = refer_api.getRefIds(split=split) images_ids_val = refer_api.getImgIds(ref_ids=ref_ids_val) refs_val = refer_api.loadRefs(ref_ids=ref_ids_val) refer_seg_ds = {} refer_seg_ds["images"] = [] loaded_images = refer_api.loadImgs(image_ids=images_ids_val) for item in loaded_images: item = item.copy() if ds == "refclef": item["file_name"] = os.path.join( self.base_dir, "images/saiapr_tc-12", item["file_name"] ) elif ds in ["refcoco", "refcoco+", "refcocog", "grefcoco"]: item["file_name"] = os.path.join( self.base_dir, "images/mscoco/images/train2014", item["file_name"], ) refer_seg_ds["images"].append(item) refer_seg_ds["annotations"] = refer_api.Anns # anns_val img2refs = {} for ref in refs_val: image_id = ref["image_id"] img2refs[image_id] = img2refs.get(image_id, []) + [ ref, ] refer_seg_ds["img2refs"] = img2refs self.refer_seg_ds = refer_seg_ds self.data_type = "refer_seg" self.ds = ds self.tokenizer = tokenizer self.transform = OwlViTProcessor.from_pretrained("google/owlvit-base-patch16") self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower) def __len__(self): if self.data_type == "refer_seg": return len(self.refer_seg_ds["images"]) else: return len(self.images) def preprocess(self, x: torch.Tensor) -> torch.Tensor: """Normalize pixel values and pad to a square input.""" # Normalize colors x = (x - self.pixel_mean) / self.pixel_std # Pad h, w = x.shape[-2:] padh = self.img_size - h padw = self.img_size - w x = F.pad(x, (0, padw, 0, padh)) return x def __getitem__(self, idx): if self.data_type == "refer_seg": refer_seg_ds = self.refer_seg_ds images = refer_seg_ds["images"] annotations = refer_seg_ds["annotations"] img2refs = refer_seg_ds["img2refs"] image_info = images[idx] image_path = image_info["file_name"] image_id = image_info["id"] refs = img2refs[image_id] if len(refs) == 0: raise ValueError("image {} has no refs".format(image_id)) sents = [] ann_ids = [] for ref in refs: for sent in ref["sentences"]: sents.append(sent["sent"].strip().lower()) ann_ids.append(ref["ann_id"]) sampled_sents = sents sampled_ann_ids = ann_ids image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) is_sentence = False else: image_path = self.images[idx] image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) json_path = image_path.replace(".jpg", ".json") mask_json, sampled_sents, is_sentence = get_mask_from_json(json_path, image) sampled_sents = [sampled_sents[0]] conversations = [] conv = conversation_lib.default_conversation.copy() i = 0 while i < len(sampled_sents): conv.messages = [] text = sampled_sents[i].strip() if is_sentence: conv.append_message( conv.roles[0], DEFAULT_IMAGE_TOKEN + "\n {} Please output segmentation mask.".format(text), ) conv.append_message(conv.roles[1], "[LOC].") else: conv.append_message( conv.roles[0], DEFAULT_IMAGE_TOKEN + "\n Please locate the {} in this image.".format( text ), ) conv.append_message(conv.roles[1], "Sure, [LOC].") conversations.append(conv.get_prompt()) i += 1 # preprocess image for clip image_clip = self.clip_image_processor.preprocess(
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask conv = conversation_lib.default_conversation.copy() targets = input_ids.clone() if conv_type == "llava_v1": sep = conv.sep + conv.roles[1] + ": " else: sep = "[/INST] " for conversation, target in zip(conversation_list, targets): total_len = int(target.ne(tokenizer.pad_token_id).sum()) rounds = conversation.split(conv.sep2) cur_len = 1 target[:cur_len] = IGNORE_INDEX for i, rou in enumerate(rounds): if rou == "": break parts = rou.split(sep) # if len(parts) != 2: # break assert len(parts) == 2, (len(parts), rou) parts[0] += sep if DEFAULT_IMAGE_TOKEN in conversation: round_len = len(tokenizer_image_token(rou, tokenizer)) instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 else: round_len = len(tokenizer(rou).input_ids) instruction_len = len(tokenizer(parts[0]).input_ids) - 2 target[cur_len : cur_len + instruction_len] = IGNORE_INDEX cur_len += round_len target[cur_len:] = IGNORE_INDEX if False: z = target.clone() z = torch.where(z == IGNORE_INDEX, tokenizer.unk_token_id, z) if local_rank == 0: print( "conversation: ", conversation, "tokenizer.decode(z): ", tokenizer.decode(z), ) if cur_len < tokenizer.model_max_length: assert cur_len == total_len if inferences[0] == False: truncate_len = tokenizer.model_max_length - 255 if input_ids.shape[1] > truncate_len: input_ids = input_ids[:, :truncate_len] targets = targets[:, :truncate_len] attention_masks = attention_masks[:, :truncate_len] return { "image_paths": image_path_list, "images": torch.stack(images_list, dim=0), "images_clip": torch.stack(images_clip_list, dim=0), "input_ids": input_ids, "labels": targets, "bboxes_labels_list": bboxes_labels_list, "bboxes_valid_list": torch.tensor(bboxes_valid_list), "masks_valid_list": masks_valid_list, "attention_masks": attention_masks, "masks_list": masks_list, "label_list": label_list, "resize_list": resize_list, "offset": torch.LongTensor(offset_list), "questions_list": questions_list, "sampled_classes_list": sampled_classes_list, "inference": inferences[0], "conversation_list": conversation_list, } class HybridDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, samples_per_epoch=500 * 8 * 2 * 10, precision: str = "fp32", num_classes_per_sample: int = 3, exclude_val=False, dataset="general_segdet||refer_seg||vqa||reason_seg", sample_rate=[9, 3, 3, 1], general_segdet_data="objects365||cocostuff||paco_lvis", general_segdet_sample_rate=[2,1,1], refer_seg_data="refclef||refcoco||refcoco+||refcocog", vqa_data="possible_locations_conv_86k||llava_instruct_80k", vqa_sample_rate=[2,1], ): self.exclude_val = exclude_val self.dataset = dataset self.samples_per_epoch = samples_per_epoch self.num_classes_per_sample = num_classes_per_sample sample_rate = np.array(sample_rate) self.sample_rate = sample_rate / sample_rate.sum() self.base_dir = base_dir self.tokenizer = tokenizer self.precision = precision self.datasets = dataset.split("||") self.all_datasets = [] for dataset in self.datasets: if dataset == "general_segdet": self.all_datasets.append( SegDetDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, general_segdet_data, general_segdet_sample_rate, ) ) elif dataset == "refer_seg": self.all_datasets.append( ReferSegDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, refer_seg_data, ) ) elif dataset == "vqa": self.all_datasets.append( VQADataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, vqa_data, vqa_sample_rate, ) ) elif dataset == "mixed_grounding": self.all_datasets.append( MixedGroundingDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, ) ) def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): ind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate) data = self.all_datasets[ind] inference = False return *data[0], inference class ValDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, val_dataset, ): self.base_dir = base_dir splits = val_dataset.split("|") if len(splits) == 2: ds, split = splits images = glob.glob( os.path.join(self.base_dir, "reason_seg", ds, split, "*.jpg") ) self.images = images self.data_type = "reason_seg" elif len(splits) == 3: self.base_dir = os.path.join(self.base_dir, 'refer_seg') ds, splitBy, split = splits refer_api = REFER(self.base_dir, ds, splitBy) ref_ids_val = refer_api.getRefIds(split=split) images_ids_val = refer_api.getImgIds(ref_ids=ref_ids_val) refs_val = refer_api.loadRefs(ref_ids=ref_ids_val) refer_seg_ds = {} refer_seg_ds["images"] = [] loaded_images = refer_api.loadImgs(image_ids=images_ids_val) for item in loaded_images: item = item.copy() if ds == "refclef": item["file_name"] = os.path.join( self.base_dir, "images/saiapr_tc-12", item["file_name"] ) elif ds in ["refcoco", "refcoco+", "refcocog", "grefcoco"]: item["file_name"] = os.path.join( self.base_dir, "images/mscoco/images/train2014", item["file_name"], ) refer_seg_ds["images"].append(item) refer_seg_ds["annotations"] = refer_api.Anns # anns_val img2refs = {} for ref in refs_val: image_id = ref["image_id"] img2refs[image_id] = img2refs.get(image_id, []) + [ ref, ] refer_seg_ds["img2refs"] = img2refs self.refer_seg_ds = refer_seg_ds self.data_type = "refer_seg" self.ds = ds self.tokenizer = tokenizer self.transform = OwlViTProcessor.from_pretrained("google/owlvit-base-patch16") self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower) def __len__(self): if self.data_type == "refer_seg": return len(self.refer_seg_ds["images"]) else: return len(self.images) def preprocess(self, x: torch.Tensor) -> torch.Tensor: """Normalize pixel values and pad to a square input.""" # Normalize colors x = (x - self.pixel_mean) / self.pixel_std # Pad h, w = x.shape[-2:] padh = self.img_size - h padw = self.img_size - w x = F.pad(x, (0, padw, 0, padh)) return x def __getitem__(self, idx): if self.data_type == "refer_seg": refer_seg_ds = self.refer_seg_ds images = refer_seg_ds["images"] annotations = refer_seg_ds["annotations"] img2refs = refer_seg_ds["img2refs"] image_info = images[idx] image_path = image_info["file_name"] image_id = image_info["id"] refs = img2refs[image_id] if len(refs) == 0: raise ValueError("image {} has no refs".format(image_id)) sents = [] ann_ids = [] for ref in refs: for sent in ref["sentences"]: sents.append(sent["sent"].strip().lower()) ann_ids.append(ref["ann_id"]) sampled_sents = sents sampled_ann_ids = ann_ids image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) is_sentence = False else: image_path = self.images[idx] image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) json_path = image_path.replace(".jpg", ".json") mask_json, sampled_sents, is_sentence = get_mask_from_json(json_path, image) sampled_sents = [sampled_sents[0]] conversations = [] conv = conversation_lib.default_conversation.copy() i = 0 while i < len(sampled_sents): conv.messages = [] text = sampled_sents[i].strip() if is_sentence: conv.append_message( conv.roles[0], DEFAULT_IMAGE_TOKEN + "\n {} Please output segmentation mask.".format(text), ) conv.append_message(conv.roles[1], "[LOC].") else: conv.append_message( conv.roles[0], DEFAULT_IMAGE_TOKEN + "\n Please locate the {} in this image.".format( text ), ) conv.append_message(conv.roles[1], "Sure, [LOC].") conversations.append(conv.get_prompt()) i += 1 # preprocess image for clip image_clip = self.clip_image_processor.preprocess(
expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors="pt")["pixel_values"][0]
15
2023-12-15 14:58:24+00:00
24k
sinoyou/nelf-pro
nerfstudio/viewer/server/viewer_utils.py
[ { "identifier": "Cameras", "path": "nerfstudio/cameras/cameras.py", "snippet": "class Cameras(TensorDataclass):\n \"\"\"Dataparser outputs for the image dataset and the ray generator.\n\n Note: currently only supports cameras with the same principal points and types. The reason we type\n the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras\n down the line in cases where your batches of camera data don't come from the same cameras.\n\n If a single value is provided, it is broadcasted to all cameras.\n\n Args:\n camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format\n fx: Focal length x\n fy: Focal length y\n cx: Principal point x\n cy: Principal point y\n width: Image width\n height: Image height\n distortion_params: OpenCV 6 radial distortion coefficients\n camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.\n times: Timestamps for each camera\n probe_config: dict config containing the generated probe information (core and basis)\n \"\"\"\n\n camera_to_worlds: TensorType[\"num_cameras\":..., 3, 4]\n fx: TensorType[\"num_cameras\":..., 1]\n fy: TensorType[\"num_cameras\":..., 1]\n cx: TensorType[\"num_cameras\":..., 1]\n cy: TensorType[\"num_cameras\":..., 1]\n width: TensorType[\"num_cameras\":..., 1]\n height: TensorType[\"num_cameras\":..., 1]\n distortion_params: Optional[TensorType[\"num_cameras\":..., 6]]\n camera_type: TensorType[\"num_cameras\":..., 1]\n times: Optional[TensorType[\"num_cameras\":..., 1]]\n image_filenames: Optional[List[str]]\n probe_config: Optional[list]\n\n def __init__(\n self,\n camera_to_worlds: TensorType[\"batch_c2ws\":..., 3, 4],\n fx: Union[TensorType[\"batch_fxs\":..., 1], float],\n fy: Union[TensorType[\"batch_fys\":..., 1], float],\n cx: Union[TensorType[\"batch_cxs\":..., 1], float],\n cy: Union[TensorType[\"batch_cys\":..., 1], float],\n width: Optional[Union[TensorType[\"batch_ws\":..., 1], int]] = None,\n height: Optional[Union[TensorType[\"batch_hs\":..., 1], int]] = None,\n distortion_params: Optional[TensorType[\"batch_dist_params\":..., 6]] = None,\n camera_type: Optional[\n Union[\n TensorType[\"batch_cam_types\":..., 1],\n int,\n List[CameraType],\n CameraType,\n ]\n ] = CameraType.PERSPECTIVE,\n times: Optional[TensorType[\"num_cameras\"]] = None,\n image_filenames: Optional[List[str]] = None,\n probe_config: Optional[list] = None\n ):\n \"\"\"Initializes the Cameras object.\n\n Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]\n (in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or\n TensorType[1] (in the case of the rest of the elements). The dimensions before that are\n considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast\n all the tensors to be the same batch dimension. This means you can use any combination of the\n input types in the function signature and it won't break. Your batch size for all tensors\n must be broadcastable to the same size, and the resulting number of batch dimensions will be\n the batch dimension with the largest number of dimensions.\n \"\"\"\n\n # This will notify the tensordataclass that we have a field with more than 1 dimension\n self._field_custom_dimensions = {\"camera_to_worlds\": 2}\n\n self.camera_to_worlds = camera_to_worlds\n\n # fx fy calculation\n self.fx = self._init_get_fc_xy(fx, \"fx\") # @dataclass's post_init will take care of broadcasting\n self.fy = self._init_get_fc_xy(fy, \"fy\") # @dataclass's post_init will take care of broadcasting\n\n # cx cy calculation\n self.cx = self._init_get_fc_xy(cx, \"cx\") # @dataclass's post_init will take care of broadcasting\n self.cy = self._init_get_fc_xy(cy, \"cy\") # @dataclass's post_init will take care of broadcasting\n\n # Distortion Params Calculation:\n self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting\n\n # @dataclass's post_init will take care of broadcasting\n self.height = self._init_get_height_width(height, self.cy)\n self.width = self._init_get_height_width(width, self.cx)\n self.camera_type = self._init_get_camera_type(camera_type)\n self.times = self._init_get_times(times)\n \n self.image_filenames = image_filenames\n self.probe_config = probe_config\n if self.probe_config is not None:\n self.probe = Probes(self.camera_to_worlds, self.probe_config)\n else:\n self.probe = None\n \n self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors\n\n def _init_get_fc_xy(self, fc_xy, name):\n \"\"\"\n Parses the input focal length / principle point x or y and returns a tensor of the correct shape\n\n Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we\n just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.\n\n Args:\n fc_xy: The focal length / principle point x or y\n name: The name of the variable. Used for error messages\n \"\"\"\n if isinstance(fc_xy, float):\n fc_xy = torch.Tensor([fc_xy], device=self.device)\n elif isinstance(fc_xy, torch.Tensor):\n if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:\n fc_xy = fc_xy.unsqueeze(-1)\n fc_xy = fc_xy.to(self.device)\n else:\n raise ValueError(f\"{name} must be a float or tensor, got {type(fc_xy)}\")\n return fc_xy\n\n def _init_get_camera_type(\n self,\n camera_type: Union[\n TensorType[\"batch_cam_types\":..., 1], TensorType[\"batch_cam_types\":...], int, List[CameraType], CameraType\n ],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument camera_type\n\n Camera Type Calculation:\n If CameraType, convert to int and then to tensor, then broadcast to all cameras\n If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n\n Args:\n camera_type: camera_type argument from __init__()\n \"\"\"\n if isinstance(camera_type, CameraType):\n camera_type = torch.tensor([camera_type.value], device=self.device)\n elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):\n camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)\n elif isinstance(camera_type, int):\n camera_type = torch.tensor([camera_type], device=self.device)\n elif isinstance(camera_type, torch.Tensor):\n assert not torch.is_floating_point(\n camera_type\n ), f\"camera_type tensor must be of type int, not: {camera_type.dtype}\"\n camera_type = camera_type.to(self.device)\n if camera_type.ndim == 0 or camera_type.shape[-1] != 1:\n camera_type = camera_type.unsqueeze(-1)\n # assert torch.all(\n # camera_type.view(-1)[0] == camera_type\n # ), \"Batched cameras of different camera_types will be allowed in the future.\"\n else:\n raise ValueError(\n 'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor[\"num_cameras\"]. \\\n Received: '\n + str(type(camera_type))\n )\n return camera_type\n\n def _init_get_height_width(\n self,\n h_w: Union[TensorType[\"batch_hws\":..., 1], TensorType[\"batch_hws\":...], int, None],\n c_x_y: TensorType[\"batch_cxys\":...],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument for height or width\n\n Height/Width Calculation:\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n If none, use cx or cy * 2\n Else raise error\n\n Args:\n h_w: height or width argument from __init__()\n c_x_y: cx or cy for when h_w == None\n \"\"\"\n if isinstance(h_w, int):\n h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)\n elif isinstance(h_w, torch.Tensor):\n assert not torch.is_floating_point(h_w), f\"height and width tensor must be of type int, not: {h_w.dtype}\"\n h_w = h_w.to(torch.int64).to(self.device)\n if h_w.ndim == 0 or h_w.shape[-1] != 1:\n h_w = h_w.unsqueeze(-1)\n # assert torch.all(h_w == h_w.view(-1)[0]), \"Batched cameras of different h, w will be allowed in the future.\"\n elif h_w is None:\n h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))\n else:\n raise ValueError(\"Height must be an int, tensor, or None, received: \" + str(type(h_w)))\n return h_w\n\n def _init_get_times(self, times):\n if times is None:\n times = None\n elif isinstance(times, torch.Tensor):\n if times.ndim == 0 or times.shape[-1] != 1:\n times = times.unsqueeze(-1).to(self.device)\n else:\n raise ValueError(f\"times must be None or a tensor, got {type(times)}\")\n\n return times\n\n @property\n def device(self):\n \"\"\"Returns the device that the camera is on.\"\"\"\n return self.camera_to_worlds.device\n\n @property\n def image_height(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.height\n\n @property\n def image_width(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.width\n\n @property\n def is_jagged(self):\n \"\"\"\n Returns whether or not the cameras are \"jagged\" (i.e. the height and widths are different, meaning that\n you cannot concatenate the image coordinate maps together)\n \"\"\"\n h_jagged = not torch.all(self.height == self.height.view(-1)[0])\n w_jagged = not torch.all(self.width == self.width.view(-1)[0])\n return h_jagged or w_jagged\n\n def get_image_coords(\n self, pixel_offset: float = 0.5, index: Optional[Tuple] = None\n ) -> TensorType[\"height\", \"width\", 2]:\n \"\"\"This gets the image coordinates of one of the cameras in this object.\n\n If no index is specified, it will return the maximum possible sized height / width image coordinate map,\n by looking at the maximum height and width of all the cameras in this object.\n\n Args:\n pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)\n index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th\n flattened camera\n\n Returns:\n Grid of image coordinates.\n \"\"\"\n if index is None:\n image_height = torch.max(self.image_height.view(-1))\n image_width = torch.max(self.image_width.view(-1))\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n else:\n image_height = self.image_height[index].item()\n image_width = self.image_width[index].item()\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n return image_coords\n\n def generate_rays( # pylint: disable=too-many-statements\n self,\n camera_indices: Union[TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"], int],\n coords: Optional[TensorType[\"num_rays\":..., 2]] = None,\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n keep_shape: Optional[bool] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices.\n\n This function will standardize the input arguments and then call the _generate_rays_from_coords function\n to generate the rays. Our goal is to parse the arguments and then get them into the right shape:\n - camera_indices: (num_rays:..., num_cameras_batch_dims)\n - coords: (num_rays:..., 2)\n - camera_opt_to_camera: (num_rays:..., 3, 4) or None\n - distortion_params_delta: (num_rays:..., 6) or None\n\n Read the docstring for _generate_rays_from_coords for more information on how we generate the rays\n after we have standardized the arguments.\n\n We are only concerned about different combinations of camera_indices and coords matrices, and the following\n are the 4 cases we have to deal with:\n 1. isinstance(camera_indices, int) and coords == None\n - In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)\n 2. isinstance(camera_indices, int) and coords != None\n - In this case, we broadcast camera_indices to the same batch dim as coords\n 3. not isinstance(camera_indices, int) and coords == None\n - In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast\n all our other args to match the new definition of num_rays := (h, w) + num_rays\n 4. not isinstance(camera_indices, int) and coords != None\n - In this case, we have nothing to do, only check that the arguments are of the correct shape\n\n There is one more edge case we need to be careful with: when we have \"jagged cameras\" (ie: different heights\n and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.\n When coords == None (ie: when we render out the whole image associated with this camera), we run into problems\n since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,\n we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,\n regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.\n\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n distortion_params_delta: Optional delta for the distortion parameters.\n keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise\n keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the\n camera_indices and coords tensors (if we can).\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords.\n \"\"\"\n # Check the argument types to make sure they're valid and all shaped correctly\n assert isinstance(camera_indices, (torch.Tensor, int)), \"camera_indices must be a tensor or int\"\n assert coords is None or isinstance(coords, torch.Tensor), \"coords must be a tensor or None\"\n assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)\n assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)\n if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):\n num_rays_shape = camera_indices.shape[:-1]\n errormsg = \"Batch dims of inputs must match when inputs are all tensors\"\n assert coords.shape[:-1] == num_rays_shape, errormsg\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg\n\n # If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later\n if not self.shape:\n cameras = self.reshape((1,))\n assert torch.all(\n torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0\n ), \"Can only index into single camera with no batch dimensions if index is zero\"\n else:\n cameras = self\n\n # If the camera indices are an int, then we need to make sure that the camera batch is 1D\n if isinstance(camera_indices, int):\n assert (\n len(cameras.shape) == 1\n ), \"camera_indices must be a tensor if cameras are batched with more than 1 batch dimension\"\n camera_indices = torch.tensor([camera_indices], device=cameras.device)\n\n assert camera_indices.shape[-1] == len(\n cameras.shape\n ), \"camera_indices must have shape (num_rays:..., num_cameras_batch_dims)\"\n\n # If keep_shape is True, then we need to make sure that the camera indices in question\n # are all the same height and width and can actually be batched while maintaining the image\n # shape\n if keep_shape is True:\n assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(\n cameras.width[camera_indices] == cameras.width[camera_indices[0]]\n ), \"Can only keep shape if all cameras have the same height and width\"\n\n # If the cameras don't all have same height / width, if coords is not none, we will need to generate\n # a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.\n # Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial\n if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):\n index_dim = camera_indices.shape[-1]\n camera_indices = camera_indices.reshape(-1, index_dim)\n _coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]\n camera_indices = torch.cat(\n [index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],\n )\n coords = torch.cat(_coords, dim=0)\n assert coords.shape[0] == camera_indices.shape[0]\n # Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them\n\n # The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords\n # is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,\n # each image in camera_indices has to have the same shape since otherwise we would have error'd when\n # we checked keep_shape is valid or we aren't jagged.\n if coords is None:\n index_dim = camera_indices.shape[-1]\n index = camera_indices.reshape(-1, index_dim)[0]\n coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)\n coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)\n coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)\n camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None\n camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))\n if camera_opt_to_camera is not None\n else None\n )\n distortion_params_delta = ( # (h, w, num_rays, 6) or None\n distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))\n if distortion_params_delta is not None\n else None\n )\n\n # If camera indices was an int or coords was none, we need to broadcast our indices along batch dims\n camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)\n\n # Checking our tensors have been standardized\n assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)\n assert camera_indices.shape[-1] == len(cameras.shape)\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]\n\n # This will do the actual work of generating the rays now that we have standardized the inputs\n # raybundle.shape == (num_rays) when done\n # pylint: disable=protected-access\n raybundle = cameras._generate_rays_from_coords(\n camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion\n )\n\n # If we have mandated that we don't keep the shape, then we flatten\n if keep_shape is False:\n raybundle = raybundle.flatten()\n\n # TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,\n # so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour\n # that we haven't caught yet with tests\n return raybundle\n\n # pylint: disable=too-many-statements\n def _generate_rays_from_coords(\n self,\n camera_indices: TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"],\n coords: TensorType[\"num_rays\":..., 2],\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices and coords where self isn't jagged\n\n This is a fairly complex function, so let's break this down slowly.\n\n Shapes involved:\n - num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated\n - num_cameras_batch_dims: This is the number of dimensions of our camera\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n The shape of this is such that indexing into camera_indices[\"num_rays\":...] will return the\n index into each batch dimension of the camera in order to get the correct camera specified by\n \"num_rays\".\n Example:\n >>> cameras = Cameras(...)\n >>> cameras.shape\n (2, 3, 4)\n >>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3\n >>> camera_indices.shape\n (3,)\n >>> coords = torch.tensor([1,1])\n >>> coords.shape\n (2,)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()\n >>> out_rays.shape\n ()\n >>> camera_indices = torch.tensor([[0,0,0]])\n >>> camera_indices.shape\n (1, 3)\n >>> coords = torch.tensor([[1,1]])\n >>> coords.shape\n (1, 2)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)\n # since we added an extra dimension in front of camera_indices\n >>> out_rays.shape\n (1,)\n\n If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape\n\n The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the\n output shape and if you index into the output RayBundle with some indices [i:...], if you index into\n camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch\n indices into the original cameras object corresponding to that ray (ie: you will get the camera\n from our batched cameras corresponding to the ray at RayBundle[i:...]).\n\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning\n height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will\n get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].\n\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].\n\n distortion_params_delta: Optional delta for the distortion parameters.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].\n\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords. RayBundle.shape == num_rays\n \"\"\"\n # Make sure we're on the right devices\n camera_indices = camera_indices.to(self.device)\n coords = coords.to(self.device)\n\n # Checking to make sure everything is of the right shape and type\n num_rays_shape = camera_indices.shape[:-1]\n assert camera_indices.shape == num_rays_shape + (self.ndim,)\n assert coords.shape == num_rays_shape + (2,)\n assert coords.shape[-1] == 2\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)\n assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)\n\n # Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all\n # of our output rays at each dimension of our cameras object\n true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]\n\n # Get all our focal lengths, principal points and make sure they are the right shapes\n y = coords[..., 0] # (num_rays,) get rid of the last dimension\n x = coords[..., 1] # (num_rays,) get rid of the last dimension\n fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)\n cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)\n assert (\n y.shape == num_rays_shape\n and x.shape == num_rays_shape\n and fx.shape == num_rays_shape\n and fy.shape == num_rays_shape\n and cx.shape == num_rays_shape\n and cy.shape == num_rays_shape\n ), (\n str(num_rays_shape)\n + str(y.shape)\n + str(x.shape)\n + str(fx.shape)\n + str(fy.shape)\n + str(cx.shape)\n + str(cy.shape)\n )\n\n # Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)\n # Also make sure the shapes are correct\n coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)\n assert (\n coord.shape == num_rays_shape + (2,)\n and coord_x_offset.shape == num_rays_shape + (2,)\n and coord_y_offset.shape == num_rays_shape + (2,)\n )\n\n # Stack image coordinates and image coordinates offset by 1, check shapes too\n coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Undistorts our images according to our distortion parameters\n if not disable_distortion:\n distortion_params = None\n if self.distortion_params is not None:\n distortion_params = self.distortion_params[true_indices]\n if distortion_params_delta is not None:\n distortion_params = distortion_params + distortion_params_delta\n elif distortion_params_delta is not None:\n distortion_params = distortion_params_delta\n\n # Do not apply distortion for equirectangular images\n if distortion_params is not None:\n mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n coord_mask = torch.stack([mask, mask, mask], dim=0)\n if mask.any():\n coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(\n coord_stack[coord_mask, :].reshape(3, -1, 2),\n distortion_params[mask, :],\n ).reshape(-1, 2)\n\n # Make sure after we have undistorted our images, the shapes are still correct\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Gets our directions for all our rays in camera coordinates and checks shapes at the end\n # Here, directions_stack is of shape (3, num_rays, 3)\n # directions_stack[0] is the direction for ray in camera coordinates\n # directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x\n # directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y\n cam_types = torch.unique(self.camera_type, sorted=False)\n directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)\n if CameraType.PERSPECTIVE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()\n directions_stack[..., 2][mask] = -1.0\n\n if CameraType.FISHEYE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))\n theta = torch.clip(theta, 0.0, math.pi)\n\n sin_theta = torch.sin(theta)\n\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()\n directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)\n\n if CameraType.EQUIRECTANGULAR.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n # For equirect, fx = fy = height = width/2\n # Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2\n theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed\n phi = torch.pi * (0.5 - coord_stack[..., 1])\n # use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)\n directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()\n directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()\n\n for value in cam_types:\n if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:\n raise ValueError(f\"Camera type {value} not supported.\")\n\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n c2w = self.camera_to_worlds[true_indices]\n assert c2w.shape == num_rays_shape + (3, 4)\n\n if camera_opt_to_camera is not None:\n c2w = pose_utils.multiply(c2w, camera_opt_to_camera)\n rotation = c2w[..., :3, :3] # (..., 3, 3)\n assert rotation.shape == num_rays_shape + (3, 3)\n\n directions_stack = torch.sum(\n directions_stack[..., None, :] * rotation, dim=-1\n ) # (..., 1, 3) * (..., 3, 3) -> (..., 3)\n\n directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)\n directions_norm = directions_norm[0]\n\n directions_stack = normalize(directions_stack, dim=-1)\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n origins = c2w[..., :3, 3] # (..., 3)\n assert origins.shape == num_rays_shape + (3,)\n\n directions = directions_stack[0]\n assert directions.shape == num_rays_shape + (3,)\n\n # norms of the vector going between adjacent coords, giving us dx and dy per output ray\n dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # (\"num_rays\":...,)\n dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # (\"num_rays\":...,)\n assert dx.shape == num_rays_shape and dy.shape == num_rays_shape\n\n pixel_area = (dx * dy)[..., None] # (\"num_rays\":..., 1)\n assert pixel_area.shape == num_rays_shape + (1,)\n\n times = self.times[camera_indices, 0] if self.times is not None else None\n\n\n return RayBundle(\n origins=origins,\n directions=directions,\n pixel_area=pixel_area,\n camera_indices=camera_indices,\n directions_norm=directions_norm,\n times=times,\n probes=self.probe,\n )\n\n def to_json(\n self, camera_idx: int, image: Optional[TensorType[\"height\", \"width\", 2]] = None, max_size: Optional[int] = None\n ) -> Dict:\n \"\"\"Convert a camera to a json dictionary.\n\n Args:\n camera_idx: Index of the camera to convert.\n image: An image in range [0, 1] that is encoded to a base64 string.\n max_size: Max size to resize the image to if present.\n\n Returns:\n A JSON representation of the camera\n \"\"\"\n flattened = self.flatten()\n json_ = {\n \"type\": \"PinholeCamera\",\n \"cx\": flattened[camera_idx].cx.item(),\n \"cy\": flattened[camera_idx].cy.item(),\n \"fx\": flattened[camera_idx].fx.item(),\n \"fy\": flattened[camera_idx].fy.item(),\n \"camera_to_world\": self.camera_to_worlds[camera_idx].tolist(),\n \"camera_index\": camera_idx,\n \"times\": flattened[camera_idx].times.item() if self.times is not None else None,\n }\n if image is not None:\n image_uint8 = (image * 255).detach().type(torch.uint8)\n if max_size is not None:\n image_uint8 = image_uint8.permute(2, 0, 1)\n image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore\n image_uint8 = image_uint8.permute(1, 2, 0)\n image_uint8 = image_uint8.cpu().numpy()\n data = cv2.imencode(\".jpg\", image_uint8)[1].tobytes()\n json_[\"image\"] = str(\"data:image/jpeg;base64,\" + base64.b64encode(data).decode(\"ascii\"))\n return json_\n\n def get_intrinsics_matrices(self) -> TensorType[\"num_cameras\":..., 3, 3]:\n \"\"\"Returns the intrinsic matrices for each camera.\n\n Returns:\n Pinhole camera intrinsics matrices\n \"\"\"\n K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)\n K[..., 0, 0] = self.fx.squeeze(-1)\n K[..., 1, 1] = self.fy.squeeze(-1)\n K[..., 0, 2] = self.cx.squeeze(-1)\n K[..., 1, 2] = self.cy.squeeze(-1)\n K[..., 2, 2] = 1.0\n return K\n\n def rescale_output_resolution(\n self,\n scaling_factor: Union[TensorType[\"num_cameras\":...], TensorType[\"num_cameras\":..., 1], float, int],\n round_hw=False,\n ) -> None:\n \"\"\"Rescale the output resolution of the cameras.\n\n Args:\n scaling_factor: Scaling factor to apply to the output resolution.\n round_hw: Whether to round the height and width to the nearest integer.\n \"\"\"\n if isinstance(scaling_factor, (float, int)):\n scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:\n scaling_factor = scaling_factor.unsqueeze(-1)\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):\n pass\n else:\n raise ValueError(\n f\"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}.\"\n )\n\n self.fx = self.fx * scaling_factor\n self.fy = self.fy * scaling_factor\n self.cx = self.cx * scaling_factor\n self.cy = self.cy * scaling_factor\n if not round_hw:\n self.height = (self.height * scaling_factor).to(torch.int64)\n self.width = (self.width * scaling_factor).to(torch.int64)\n else:\n self.height = torch.floor(self.height * scaling_factor + 0.5).to(torch.int64)\n self.width = torch.floor(self.width * scaling_factor + 0.5).to(torch.int64)\n\n def get_plotly(self, camera_group):\n\n # define local necssary coordinates for plotting\n num_cameras = self.camera_to_worlds.shape[0]\n _cam_center_c = np.array([[.0, .0, .0]]).repeat(num_cameras, axis=0)\n _cam_forward_c = np.array([[.0, .0, -1.0]]).repeat(num_cameras, axis=0)\n _cam_up_c = np.array([[.0, 1.0, .0]]).repeat(num_cameras, axis=0)\n _cam_right_c = np.array([[1.0, .0, .0]]).repeat(num_cameras, axis=0)\n\n _pyramid_width = self.width.cpu().numpy() / self.fx.cpu().numpy()\n _pyramid_height = self.height.cpu().numpy() / self.fy.cpu().numpy()\n\n _cam_pyramid_ur = np.concatenate([_pyramid_width/2, _pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_dr = np.concatenate([_pyramid_width/2, -_pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_ul = np.concatenate([-_pyramid_width/2, _pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_dl = np.concatenate([-_pyramid_width/2, -_pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n\n _local_coordinates = {\n 'center': _cam_center_c, \n 'forward': _cam_forward_c, \n 'up': _cam_up_c, \n 'right': _cam_right_c, \n 'pyramid_ur': _cam_pyramid_ur, \n 'pyramid_dr': _cam_pyramid_dr, \n 'pyramid_ul': _cam_pyramid_ul, \n 'pyramid_dl': _cam_pyramid_dl, \n }\n\n # transform it into world coordinates\n data = {}\n for k in _local_coordinates.keys():\n _local_coor_homo = np.concatenate([_local_coordinates[k].reshape(-1, 3) * plotly_camera_scale, np.ones((num_cameras, 1))], axis=-1) # num_cam, 4\n _cw = self.camera_to_worlds.cpu().numpy() # num_cam, 3, 4\n\n _homo = np.einsum('ijk,ik->ij', _cw, _local_coor_homo) # num_cam, 3\n data[k] = _homo[:, :3]\n\n plot_data = plot_camera_components(data, image_list=self.image_filenames, camera_group=camera_group)\n \n if isinstance(plot_data, list):\n return plot_data\n else:\n return [plot_data]" }, { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n directions_norm: Optional[TensorType[..., 1]] = None\n \"\"\"Norm of ray direction vector before normalization\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n probes: Optional[Probes] = None\n \"\"\"Probe Cameras Object. This object doesn't follow the same shape pattern as the other fields. \n Lazy broadcasting is used for preventing CUDA memory overflow. \"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self):\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indicies.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin. (in Euclidean space)\n bin_ends: Distance from origin to end of bin. (in Euclidean space)\n spacing_starts: start point in normalized space. [0, 1]\n spacing_ends: end point in normalized space. [0, 1]\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n probes=self.probes, # special class, not following the same shape pattern\n )\n\n return ray_samples" }, { "identifier": "base_config", "path": "nerfstudio/configs/base_config.py", "snippet": "CONSOLE = Console(width=120)\nclass PrintableConfig: # pylint: disable=too-few-public-methods\nclass InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods\nclass MachineConfig(PrintableConfig):\nclass LocalWriterConfig(InstantiateConfig):\nclass LoggingConfig(PrintableConfig):\nclass TrainerConfig(PrintableConfig):\nclass ViewerConfig(PrintableConfig):\nclass Config(PrintableConfig):\n def __str__(self):\n def setup(self, **kwargs) -> Any:\n def setup(self, banner_messages: Optional[List[str]] = None, **kwargs) -> Any:\n def is_viewer_enabled(self) -> bool:\n def is_wandb_enabled(self) -> bool:\n def is_tensorboard_enabled(self) -> bool:\n def set_timestamp(self) -> None:\n def set_experiment_name(self) -> None:\n def get_base_dir(self) -> Path:\n def get_checkpoint_dir(self) -> Path:\n def print_to_terminal(self) -> None:\n def save_config(self) -> None:" }, { "identifier": "InputDataset", "path": "nerfstudio/data/datasets/base_dataset.py", "snippet": "class InputDataset(Dataset):\n \"\"\"Dataset that returns images.\n\n Args:\n dataparser_outputs: description of where and how to read input images.\n scale_factor: The scaling factor for the dataparser outputs\n \"\"\"\n\n def __init__(self, dataparser_outputs: DataparserOutputs, scale_factor: float = 1.0):\n super().__init__()\n self._dataparser_outputs = dataparser_outputs\n self.has_masks = dataparser_outputs.mask_filenames is not None\n self.scale_factor = scale_factor\n self.scene_box = deepcopy(dataparser_outputs.scene_box)\n self.metadata = deepcopy(dataparser_outputs.metadata)\n self.cameras = deepcopy(dataparser_outputs.cameras)\n self.cameras.rescale_output_resolution(scaling_factor=scale_factor)\n self.image_cache = {}\n\n def __len__(self):\n return len(self._dataparser_outputs.image_filenames)\n\n def get_numpy_image(self, image_idx: int) -> npt.NDArray[np.uint8]:\n \"\"\"Returns the image of shape (H, W, 3 or 4).\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image_filename = self._dataparser_outputs.image_filenames[image_idx]\n pil_image = Image.open(image_filename)\n if self.scale_factor != 1.0:\n width, height = pil_image.size\n newsize = (int(width * self.scale_factor), int(height * self.scale_factor))\n pil_image = pil_image.resize(newsize, resample=Image.BILINEAR)\n image = np.array(pil_image, dtype=\"uint8\") # shape is (h, w, 3 or 4)\n # mask_filename = str(image_filename).replace(\"dense/images\", \"masks\").replace(\".jpg\", \".npy\")\n # mask = np.load(mask_filename)\n # image = image * mask[..., None]\n\n assert len(image.shape) == 3\n assert image.dtype == np.uint8\n assert image.shape[2] in [3, 4], f\"Image shape of {image.shape} is in correct.\"\n return image\n\n def get_image(self, image_idx: int) -> TensorType[\"image_height\", \"image_width\", \"num_channels\"]:\n \"\"\"Returns a 3 channel image.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image = torch.from_numpy(self.get_numpy_image(image_idx).astype(\"float32\") / 255.0)\n if self._dataparser_outputs.alpha_color is not None and image.shape[-1] == 4:\n assert image.shape[-1] == 4\n image = image[:, :, :3] * image[:, :, -1:] + self._dataparser_outputs.alpha_color * (1.0 - image[:, :, -1:])\n else:\n image = image[:, :, :3]\n return image\n\n def get_data(self, image_idx: int) -> Dict:\n \"\"\"Returns the ImageDataset data as a dictionary.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n if image_idx in self.image_cache:\n image = self.image_cache[image_idx]\n else:\n image = self.get_image(image_idx)\n self.image_cache[image_idx] = image\n\n data = {\"image_idx\": image_idx, 'image_filename': self._dataparser_outputs.image_filenames[image_idx].name}\n data[\"image\"] = image\n for _, data_func_dict in self._dataparser_outputs.additional_inputs.items():\n assert \"func\" in data_func_dict, \"Missing function to process data: specify `func` in `additional_inputs`\"\n func = data_func_dict[\"func\"]\n assert \"kwargs\" in data_func_dict, \"No data to process: specify `kwargs` in `additional_inputs`\"\n data.update(func(image_idx, **data_func_dict[\"kwargs\"]))\n if self.has_masks:\n mask_filepath = self._dataparser_outputs.mask_filenames[image_idx]\n data[\"mask\"] = get_image_mask_tensor_from_path(filepath=mask_filepath, scale_factor=self.scale_factor)\n metadata = self.get_metadata(data)\n data.update(metadata)\n return data\n\n # pylint: disable=no-self-use\n def get_metadata(self, data: Dict) -> Dict:\n \"\"\"Method that can be used to process any additional metadata that may be part of the model inputs.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n del data\n return {}\n\n def __getitem__(self, image_idx: int) -> Dict:\n data = self.get_data(image_idx)\n return data" }, { "identifier": "Model", "path": "nerfstudio/models/base_model.py", "snippet": "class Model(nn.Module):\n \"\"\"Model class\n Where everything (Fields, Optimizers, Samplers, Visualization, etc) is linked together. This should be\n subclassed for custom NeRF model.\n\n Args:\n config: configuration for instantiating model\n scene_box: dataset scene box\n \"\"\"\n\n config: ModelConfig\n\n def __init__(\n self,\n config: ModelConfig,\n scene_box: SceneBox,\n num_train_data: int,\n world_size: int = 1,\n local_rank: int = 0,\n load_step: int = None, \n **kwargs,\n ) -> None:\n super().__init__()\n self.config = config\n self.scene_box = scene_box\n self.num_train_data = num_train_data\n self.kwargs = kwargs\n self.collider = None\n self.world_size = world_size\n self.local_rank = local_rank\n self.load_step = load_step\n\n self.populate_modules() # populate the modules\n self.callbacks = None\n # to keep track of which device the nn.Module is on\n self.device_indicator_param = nn.Parameter(torch.empty(0))\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.device_indicator_param.device\n\n def get_training_callbacks( # pylint:disable=no-self-use\n self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument\n ) -> List[TrainingCallback]:\n \"\"\"Returns a list of callbacks that run functions at the specified training iterations.\"\"\"\n return []\n\n def populate_modules(self):\n \"\"\"Set the necessary modules to get the network working.\"\"\"\n # default instantiates optional modules that are common among many networks\n # NOTE: call `super().populate_modules()` in subclasses\n\n if self.config.enable_collider:\n self.collider = NearFarCollider(\n near_plane=self.config.collider_params[\"near_plane\"], far_plane=self.config.collider_params[\"far_plane\"]\n )\n\n @abstractmethod\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Obtain the parameter groups for the optimizers\n\n Returns:\n Mapping of different parameter groups\n \"\"\"\n\n @abstractmethod\n def get_outputs(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in a Ray Bundle and returns a dictionary of outputs.\n\n Args:\n ray_bundle: Input bundle of rays. This raybundle should have all the\n needed information to compute the outputs.\n\n Returns:\n Outputs of model. (ie. rendered colors)\n \"\"\"\n\n def forward(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Run forward starting with a ray bundle. This outputs different things depending on the configuration\n of the model and whether or not the batch is provided (whether or not we are training basically)\n\n Args:\n ray_bundle: containing all the information needed to render that ray latents included\n \"\"\"\n\n if self.collider is not None:\n ray_bundle = self.collider(ray_bundle)\n\n return self.get_outputs(ray_bundle)\n\n def get_metrics_dict(self, outputs, batch) -> Dict[str, torch.Tensor]:\n \"\"\"Compute and returns metrics.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n \"\"\"\n # pylint: disable=unused-argument\n # pylint: disable=no-self-use\n return {}\n \n\n @abstractmethod\n def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict[str, torch.Tensor]:\n \"\"\"Computes and returns the losses dict.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n metrics_dict: dictionary of metrics, some of which we can use for loss\n \"\"\"\n \n def n_parameters(self):\n return -1.0\n\n @torch.no_grad()\n def get_outputs_for_camera_ray_bundle(self, camera_ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in camera parameters and computes the output of the model.\n\n Args:\n camera_ray_bundle: ray bundle to calculate outputs over\n \"\"\"\n num_rays_per_chunk = self.config.eval_num_rays_per_chunk\n image_height, image_width = camera_ray_bundle.origins.shape[:2]\n num_rays = len(camera_ray_bundle)\n outputs_lists = defaultdict(list)\n for i in range(0, num_rays, num_rays_per_chunk):\n start_idx = i\n end_idx = i + num_rays_per_chunk\n ray_bundle = camera_ray_bundle.get_row_major_sliced_ray_bundle(start_idx, end_idx)\n outputs = self.forward(ray_bundle=ray_bundle)\n for output_name, output in outputs.items(): # type: ignore\n outputs_lists[output_name].append(output)\n outputs = {}\n for output_name, outputs_list in outputs_lists.items():\n if not torch.is_tensor(outputs_list[0]):\n # TODO: handle lists of tensors as well\n continue\n outputs[output_name] = torch.cat(outputs_list).view(image_height, image_width, -1) # type: ignore\n return outputs\n\n @abstractmethod\n def get_image_metrics_and_images(\n self, outputs: Dict[str, torch.Tensor], batch: Dict[str, torch.Tensor]\n ) -> Tuple[Dict[str, float], Dict[str, torch.Tensor]]:\n \"\"\"Writes the test image outputs.\n TODO: This shouldn't return a loss\n\n Args:\n image_idx: Index of the image.\n step: Current step.\n batch: Batch of data.\n outputs: Outputs of the model.\n\n Returns:\n A dictionary of metrics.\n \"\"\"\n\n def load_model(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: dictionary of pre-trained model states\n \"\"\"\n state = {key.replace(\"module.\", \"\"): value for key, value in loaded_state[\"model\"].items()}\n self.load_state_dict(state) # type: ignore\n \n def customized_save(self, step: int, checkpoint_dir) -> None:\n \"\"\"Call the model's customized save function.\n\n Args:\n step: Current step.\n checkpoint_dir: directory of checkpoint\n \"\"\"\n pass\n\n def customized_load(self, load_step: int, checkpoint_dir) -> None:\n \"\"\"Call the model's customized load function.\n\n Args:\n checkpoint_dir: directory of checkpoint\n \"\"\"\n pass" }, { "identifier": "colormaps", "path": "nerfstudio/utils/colormaps.py", "snippet": "def apply_colormap(image: TensorType[\"bs\":..., 1], cmap=\"viridis\") -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_depth_colormap(\n depth: TensorType[\"bs\":..., 1],\n accumulation: Optional[TensorType[\"bs\":..., 1]] = None,\n near_plane: Optional[float] = None,\n far_plane: Optional[float] = None,\n cmap=\"turbo\",\n) -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_boolean_colormap(\n image: TensorType[\"bs\":..., 1, bool],\n true_color: TensorType[\"bs\":..., \"rgb\":3] = colors.WHITE,\n false_color: TensorType[\"bs\":..., \"rgb\":3] = colors.BLACK,\n) -> TensorType[\"bs\":..., \"rgb\":3]:" }, { "identifier": "profiler", "path": "nerfstudio/utils/profiler.py", "snippet": "CONSOLE = Console(width=120)\nPROFILER = []\ndef time_function(func: Callable) -> Callable:\n def wrapper(*args, **kwargs):\ndef flush_profiler(config: cfg.LoggingConfig):\ndef setup_profiler(config: cfg.LoggingConfig):\n def __init__(self, config: cfg.LoggingConfig):\n def update_time(self, func_name: str, start_time: float, end_time: float):\n def print_profile(self):\nclass Profiler:" }, { "identifier": "writer", "path": "nerfstudio/utils/writer.py", "snippet": "CONSOLE = Console(width=120)\nEVENT_WRITERS = []\nEVENT_STORAGE = []\nGLOBAL_BUFFER = {}\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\"\n IMAGE = \"write_image\"\n PLOTLY = \"write_plotly\"\n SCALAR = \"write_scalar\"\n DICT = \"write_scalar_dict\"\n CONFIG = \"write_config\"\nclass EventName(enum.Enum):\nclass EventType(enum.Enum):\nclass Writer:\nclass TimeWriter:\nclass WandbWriter(Writer):\nclass TensorboardWriter(Writer):\nclass LocalWriter:\ndef put_image(name, image: TensorType[\"H\", \"W\", \"C\"], step: int):\ndef put_plotly(name: str, figure: Any, step: int = 0):\ndef put_scalar(name: str, scalar: Any, step: int):\ndef put_dict(name: str, scalar_dict: Dict[str, Any], step: int):\ndef put_config(name: str, config_dict: Dict[str, Any], step: int):\ndef put_time(name: str, duration: float, step: int, avg_over_steps: bool = True, update_eta: bool = False):\ndef write_out_storage():\ndef setup_local_writer(config: cfg.LoggingConfig, max_iter: int, banner_messages: Optional[List[str]] = None) -> None:\ndef setup_event_writer(config: cfg.Config, log_dir: Path) -> None:\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_scalar_dict(self, name: str, scalar_dict: Dict[str, Any], step: int) -> None:\n def __init__(self, writer, name, step=None, write=True):\n def __enter__(self):\n def __exit__(self, *args):\n def __init__(self, log_dir: Path, experiment_name: str):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def __init__(self, log_dir: Path):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int): # pylint: disable=unused-argument\ndef _cursorup(x: int):\ndef _format_time(seconds):\n def __init__(self, config: cfg.LocalWriterConfig, banner_messages: Optional[List[str]] = None):\n def write_stats_log(self, step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def _consolidate_events(self):\n def _update_header(self, latest_map, new_key):\n def _print_stats(self, latest_map, padding=\" \"):" }, { "identifier": "check_main_thread", "path": "nerfstudio/utils/decorators.py", "snippet": "def check_main_thread(func: Callable) -> Callable:\n \"\"\"Decorator: check if you are on main thread\"\"\"\n\n def wrapper(*args, **kwargs):\n ret = None\n if comms.is_main_process():\n ret = func(*args, **kwargs)\n return ret\n\n return wrapper" }, { "identifier": "decorate_all", "path": "nerfstudio/utils/decorators.py", "snippet": "def decorate_all(decorators: List[Callable]) -> Callable:\n \"\"\"A decorator to decorate all member functions of a class\n\n Args:\n decorators: list of decorators to add to all functions in the class\n \"\"\"\n\n def decorate(cls):\n for attr in cls.__dict__:\n if callable(getattr(cls, attr)) and attr != \"__init__\":\n for decorator in decorators:\n setattr(cls, attr, decorator(getattr(cls, attr)))\n return cls\n\n return decorate" }, { "identifier": "BasicImages", "path": "nerfstudio/utils/images.py", "snippet": "class BasicImages:\n \"\"\"This is a very primitive struct for holding images, especially for when these images\n are of different heights / widths.\n\n The purpose of this is to have a special struct wrapping around a list so that the\n nerfstudio_collate fn and other parts of the code recognise this as a struct to leave alone\n instead of reshaping or concatenating into a single tensor (since this will likely be used\n for cases where we have images of different sizes and shapes).\n\n This only has one batch dimension and will likely be replaced down the line with some\n TensorDataclass alternative that supports arbitrary batches.\n \"\"\"\n\n def __init__(self, images: List):\n assert isinstance(images, List)\n assert not images or isinstance(\n images[0], torch.Tensor\n ), f\"Input should be a list of tensors, not {type(images[0]) if isinstance(images, List) else type(images)}\"\n self.images = images\n\n def to(self, device):\n \"\"\"Move the images to the given device.\"\"\"\n assert isinstance(device, torch.device)\n return BasicImages([image.to(device) for image in self.images])" }, { "identifier": "load_from_json", "path": "nerfstudio/utils/io.py", "snippet": "def load_from_json(filename: Path):\n \"\"\"Load a dictionary from a JSON filename.\n\n Args:\n filename: The filename to load from.\n \"\"\"\n assert filename.suffix == \".json\"\n with open(filename, encoding=\"UTF-8\") as file:\n return json.load(file)" }, { "identifier": "write_to_json", "path": "nerfstudio/utils/io.py", "snippet": "def write_to_json(filename: Path, content: dict):\n \"\"\"Write data to a JSON file.\n\n Args:\n filename: The filename to write to.\n content: The dictionary data to write.\n \"\"\"\n assert filename.suffix == \".json\", \"Filename must have .json extension but got {}\".format(filename)\n with open(filename, \"w\", encoding=\"UTF-8\") as file:\n json.dump(content, file)" }, { "identifier": "GLOBAL_BUFFER", "path": "nerfstudio/utils/writer.py", "snippet": "GLOBAL_BUFFER = {}" }, { "identifier": "EventName", "path": "nerfstudio/utils/writer.py", "snippet": "class EventName(enum.Enum):\n \"\"\"Names of possible events that can be logged via Local Writer for convenience.\n see config/logging/default_logging.yaml\"\"\"\n\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\"" }, { "identifier": "TimeWriter", "path": "nerfstudio/utils/writer.py", "snippet": "class TimeWriter:\n \"\"\"Timer context manager that calculates duration around wrapped functions\"\"\"\n\n def __init__(self, writer, name, step=None, write=True):\n self.writer = writer\n self.name = name\n self.step = step\n self.write = write\n\n self.start: float = 0.0\n self.duration: float = 0.0\n\n def __enter__(self):\n torch.cuda.synchronize()\n self.start = time()\n return self\n\n def __exit__(self, *args):\n torch.cuda.synchronize()\n self.duration = time() - self.start\n update_step = self.step is not None\n if self.write:\n self.writer.put_time(\n name=self.name,\n duration=self.duration,\n step=self.step if update_step else GLOBAL_BUFFER[\"max_iter\"],\n avg_over_steps=update_step,\n update_eta=self.name == EventName.ITER_TRAIN_TIME,\n )" }, { "identifier": "run_viewer_bridge_server_as_subprocess", "path": "nerfstudio/viewer/server/subprocess.py", "snippet": "def run_viewer_bridge_server_as_subprocess(\n websocket_port: int,\n zmq_port: Optional[int] = None,\n ip_address: str = \"127.0.0.1\",\n log_filename: Union[str, None] = None,\n):\n \"\"\"Runs the viewer bridge server as a subprocess.\n\n Args:\n zmq_port: Port to use for the ZMQ server.\n websocket_port: Port to use for the websocket server.\n ip_address: host to connect to\n log_filename: Filename to use for the log file. If None, no log file is created.\n\n Returns:\n None\n \"\"\"\n args = [sys.executable, \"-u\", \"-m\", server.__name__]\n\n # find an available port for zmq\n if zmq_port is None:\n sock = socket.socket()\n sock.bind((\"\", 0))\n zmq_port = sock.getsockname()[1]\n string = f\"Using ZMQ port: {zmq_port}\"\n CONSOLE.print(f\"[bold yellow]{string}\")\n\n args.append(\"--zmq-port\")\n args.append(str(zmq_port))\n args.append(\"--websocket-port\")\n args.append(str(websocket_port))\n args.append(\"--ip-address\")\n args.append(str(ip_address))\n # supress output if no log filename is specified\n logfile = open( # pylint: disable=consider-using-with\n log_filename if log_filename else os.devnull, \"w\", encoding=\"utf8\"\n )\n process = subprocess.Popen( # pylint: disable=consider-using-with\n args, stdout=logfile, stderr=logfile, start_new_session=True\n )\n\n def cleanup(process):\n process.kill()\n process.wait()\n\n def poll_process():\n \"\"\"\n Continually check to see if the viewer bridge server process is still running and has not failed.\n If it fails, alert the user and exit the entire program.\n \"\"\"\n while process.poll() is None:\n time.sleep(0.5)\n string = f\"\\nThe viewer bridge server subprocess failed. Please check the log file {log_filename}.\\n\"\n string += (\n \"You likely have to modify --viewer.zmq-port and/or --viewer.websocket-port in the \"\n \"config to avoid conflicting ports.\\n\"\n )\n string += \"Try modifying --viewer.websocket-port 7007\\n\"\n CONSOLE.print(f\"[bold red]{string}\")\n cleanup(process)\n # This exists the entire program. sys.exit() will only kill the thread that this runs in.\n os.kill(os.getpid(), signal.SIGKILL)\n\n # continually check to see if the process stopped\n t1 = threading.Thread(target=poll_process)\n t1.daemon = True\n t1.start()\n atexit.register(cleanup, process)\n return zmq_port" }, { "identifier": "get_intrinsics_matrix_and_camera_to_world_h", "path": "nerfstudio/viewer/server/utils.py", "snippet": "def get_intrinsics_matrix_and_camera_to_world_h(\n camera_object: Dict[str, Any], image_height: int\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Returns the camera intrinsics matrix and the camera to world homogeneous matrix.\n\n Args:\n camera_object: a Camera object.\n image_size: the size of the image (height, width)\n \"\"\"\n # intrinsics\n fov = camera_object[\"fov\"]\n aspect = camera_object[\"aspect\"]\n image_width = aspect * image_height\n pp_w = image_width / 2.0\n pp_h = image_height / 2.0\n focal_length = three_js_perspective_camera_focal_length(fov, image_height)\n intrinsics_matrix = torch.tensor([[focal_length, 0, pp_w], [0, focal_length, pp_h], [0, 0, 1]]).float()\n\n # extrinsics\n camera_to_world_h = torch.tensor(get_chunks(camera_object[\"matrix\"], size_of_chunk=4)).T.float()\n camera_to_world_h = torch.stack(\n [\n camera_to_world_h[0, :],\n camera_to_world_h[2, :],\n camera_to_world_h[1, :],\n camera_to_world_h[3, :],\n ],\n dim=0,\n )\n\n return intrinsics_matrix, camera_to_world_h" }, { "identifier": "Viewer", "path": "nerfstudio/viewer/server/visualizer.py", "snippet": "class Viewer:\n \"\"\"Viewer class for connecting to the bridge server.\n\n Args:\n zmq_port: Where to connect with ZMQ.\n window: An already existing ViewerWindow.\n ip_address: The ip address of the bridge server.\n \"\"\"\n\n def __init__(\n self, zmq_port: Optional[int] = None, window: Optional[ViewerWindow] = None, ip_address: str = \"127.0.0.1\"\n ):\n if zmq_port is None and window is None:\n raise ValueError(\"Must specify either zmq_port or window.\")\n if window is None:\n self.window = ViewerWindow(zmq_port=zmq_port, ip_address=ip_address)\n else:\n self.window = window\n self.path = Path(())\n\n @staticmethod\n def view_into(window: ViewerWindow, path: Path):\n \"\"\"Returns a new Viewer but keeping the same ViewerWindow.\"\"\"\n vis = Viewer(window=window)\n vis.path = path\n return vis\n\n def __getitem__(self, path):\n return Viewer.view_into(self.window, self.path.append(path))\n\n def __repr__(self):\n return f\"<Viewer using: {self.window} at path: {self.path}>\"\n\n def write(self, data: Union[Dict, str, None] = None):\n \"\"\"Write data.\"\"\"\n path = self.path.lower()\n return self.window.send({\"type\": \"write\", \"path\": path, \"data\": data})\n\n def read(self):\n \"\"\"Read data.\"\"\"\n path = self.path.lower()\n return self.window.send({\"type\": \"read\", \"path\": path})\n\n def delete(self):\n \"\"\"Delete data.\"\"\"\n return self.write(data=None)" } ]
import base64 import enum import os import sys import threading import time import warnings import cv2 import numpy as np import torch from pathlib import Path from typing import Any, Dict, Optional, Tuple from cryptography.utils import CryptographyDeprecationWarning from rich.console import Console from nerfstudio.cameras.cameras import Cameras from nerfstudio.cameras.rays import RayBundle from nerfstudio.configs import base_config as cfg from nerfstudio.data.datasets.base_dataset import InputDataset from nerfstudio.models.base_model import Model from nerfstudio.utils import colormaps, profiler, writer from nerfstudio.utils.decorators import check_main_thread, decorate_all from nerfstudio.utils.images import BasicImages from nerfstudio.utils.io import load_from_json, write_to_json from nerfstudio.utils.writer import GLOBAL_BUFFER, EventName, TimeWriter from nerfstudio.viewer.server.subprocess import run_viewer_bridge_server_as_subprocess from nerfstudio.viewer.server.utils import get_intrinsics_matrix_and_camera_to_world_h from nerfstudio.viewer.server.visualizer import Viewer
21,005
Args: config: viewer setup configuration """ def __init__(self, config: cfg.ViewerConfig, log_filename: Path): self.config = config self.vis = None self.viewer_url = None self.log_filename = log_filename if self.config.launch_bridge_server: # start the viewer bridge server assert self.config.websocket_port is not None self.log_filename.parent.mkdir(exist_ok=True) zmq_port = run_viewer_bridge_server_as_subprocess( self.config.websocket_port, zmq_port=self.config.zmq_port, ip_address=self.config.ip_address, log_filename=str(self.log_filename), ) # TODO(ethan): log the output of the viewer bridge server in a file where the training logs go CONSOLE.line() version = get_viewer_version() websocket_url = f"ws://localhost:{self.config.websocket_port}" self.viewer_url = f"https://viewer.nerf.studio/versions/{version}/?websocket_url={websocket_url}" CONSOLE.rule(characters="=") CONSOLE.print(f"[Public] Open the viewer at {self.viewer_url}") CONSOLE.rule(characters="=") CONSOLE.line() self.vis = Viewer(zmq_port=zmq_port, ip_address=self.config.ip_address) else: assert self.config.zmq_port is not None self.vis = Viewer(zmq_port=self.config.zmq_port, ip_address=self.config.ip_address) # viewer specific variables self.prev_camera_matrix = None self.prev_output_type = OutputTypes.INIT self.prev_colormap_type = ColormapTypes.INIT self.prev_moving = False self.output_type_changed = True self.max_resolution = 1000 self.check_interrupt_vis = False self.check_done_render = True self.step = 0 self.static_fps = 1 self.moving_fps = 24 self.camera_moving = False self.prev_camera_timestamp = 0 self.probe_config = None self.output_list = None def _pick_drawn_image_idxs(self, total_num: int) -> list[int]: """Determine indicies of images to display in viewer. Args: total_num: total number of training images. Returns: List of indices from [0, total_num-1]. """ if self.config.max_num_display_images < 0: num_display_images = total_num else: num_display_images = min(self.config.max_num_display_images, total_num) # draw indices, roughly evenly spaced return np.linspace(0, total_num - 1, num_display_images, dtype=np.int32).tolist() def init_scene(self, dataset: InputDataset, start_train=True) -> None: """Draw some images and the scene aabb in the viewer. Args: dataset: dataset to render in the scene start_train: whether to start train when viewer init; if False, only displays dataset until resume train is toggled """ # set the config base dir self.vis["renderingState/config_base_dir"].write(str(self.log_filename.parents[0])) # clear the current scene self.vis["sceneState/sceneBox"].delete() self.vis["sceneState/cameras"].delete() # draw the training cameras and images image_indices = self._pick_drawn_image_idxs(len(dataset)) for idx in image_indices: image = dataset[idx]["image"] if isinstance(image, BasicImages): bgr = image.images[0][..., [2, 1, 0]] else: bgr = image[..., [2, 1, 0]] camera_json = dataset.cameras.to_json(camera_idx=idx, image=bgr, max_size=100) self.vis[f"sceneState/cameras/{idx:06d}"].write(camera_json) # draw the scene box (i.e., the bounding box) json_ = dataset.scene_box.to_json() self.vis["sceneState/sceneBox"].write(json_) # set the initial state whether to train or not self.vis["renderingState/isTraining"].write(start_train) # self.vis["renderingState/render_time"].write(str(0)) self.probe_config = dataset.cameras.probe_config # set the properties of the camera # self.vis["renderingState/camera"].write(json_) # set the main camera intrinsics to one from the dataset # K = camera.get_intrinsics_matrix() # set_persp_intrinsics_matrix(self.vis, K.double().numpy()) def _check_camera_path_payload(self, trainer, step: int): """Check to see if the camera path export button was pressed.""" # check if we should interrupt from a button press? camera_path_payload = self.vis["camera_path_payload"].read() if camera_path_payload: # save a model checkpoint trainer.save_checkpoint(step) # write to json file camera_path_filename = camera_path_payload["camera_path_filename"] + '.json' camera_path = camera_path_payload["camera_path"]
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code to interface with the `vis/` (the JS viewer). """ from __future__ import annotations warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning) CONSOLE = Console(width=120) def get_viewer_version() -> str: """Get the version of the viewer.""" json_filename = os.path.join(os.path.dirname(__file__), "../app/package.json") version = load_from_json(Path(json_filename))["version"] return version @check_main_thread def setup_viewer(config: cfg.ViewerConfig, log_filename: Path): """Sets up the viewer if enabled Args: config: the configuration to instantiate viewer """ viewer_state = ViewerState(config, log_filename=log_filename) banner_messages = [f"Viewer at: {viewer_state.viewer_url}"] return viewer_state, banner_messages class OutputTypes(str, enum.Enum): """Noncomprehsnive list of output render types""" INIT = "init" RGB = "rgb" RGB_FINE = "rgb_fine" ACCUMULATION = "accumulation" ACCUMULATION_FINE = "accumulation_fine" class ColormapTypes(str, enum.Enum): """Noncomprehsnive list of colormap render types""" INIT = "init" DEFAULT = "default" TURBO = "turbo" DEPTH = "depth" SEMANTIC = "semantic" BOOLEAN = "boolean" class IOChangeException(Exception): """Basic camera exception to interrupt viewer""" class SetTrace: """Basic trace function""" def __init__(self, func): self.func = func def __enter__(self): sys.settrace(self.func) return self def __exit__(self, ext_type, exc_value, traceback): sys.settrace(None) class RenderThread(threading.Thread): """Thread that does all the rendering calls while listening for interrupts Args: state: current viewer state object graph: current checkpoint of model camera_ray_bundle: input rays to pass through the graph to render out """ def __init__(self, state: "ViewerState", graph: Model, camera_ray_bundle: RayBundle): threading.Thread.__init__(self) self.state = state self.graph = graph self.camera_ray_bundle = camera_ray_bundle self.exc = None self.vis_outputs = None def run(self): """run function that renders out images given the current graph and ray bundles. Interlaced with a trace function that checks to see if any I/O changes were registered. Exits and continues program if IOChangeException thrown. """ outputs = None try: with SetTrace(self.state.check_interrupt): with torch.no_grad(): outputs = self.graph.get_outputs_for_camera_ray_bundle(self.camera_ray_bundle) except Exception as e: # pylint: disable=broad-except self.exc = e if outputs: self.vis_outputs = outputs self.state.check_done_render = True self.state.check_interrupt_vis = False def join(self, timeout=None): threading.Thread.join(self) if self.exc: raise self.exc class CheckThread(threading.Thread): """Thread the constantly checks for io changes and sets a flag indicating interrupt Args: state: current viewer state object """ def __init__(self, state): threading.Thread.__init__(self) self.state = state def run(self): """Run function that checks to see if any of the existing state has changed (e.g. camera pose/output type/resolutions). Sets the viewer state flag to true to signal to render thread that an interrupt was registered. """ self.state.check_done_render = False while not self.state.check_done_render: # check camera data = self.state.vis["renderingState/camera"].read() if data is not None: camera_object = data["object"] if self.state.prev_camera_matrix is None or ( not np.allclose(camera_object["matrix"], self.state.prev_camera_matrix) and not self.state.prev_moving ): self.state.check_interrupt_vis = True self.state.prev_moving = True return self.state.prev_moving = False # check output type output_type = self.state.vis["renderingState/output_choice"].read() if output_type is None: output_type = OutputTypes.INIT if self.state.prev_output_type != output_type: self.state.check_interrupt_vis = True return # check colormap type colormap_type = self.state.vis["renderingState/colormap_choice"].read() if colormap_type is None: colormap_type = ColormapTypes.INIT if self.state.prev_colormap_type != colormap_type: self.state.check_interrupt_vis = True return # check max render max_resolution = self.state.vis["renderingState/maxResolution"].read() if max_resolution is not None: if self.state.max_resolution != max_resolution: self.state.check_interrupt_vis = True return @decorate_all([check_main_thread]) class ViewerState: """Class to hold state for viewer variables Args: config: viewer setup configuration """ def __init__(self, config: cfg.ViewerConfig, log_filename: Path): self.config = config self.vis = None self.viewer_url = None self.log_filename = log_filename if self.config.launch_bridge_server: # start the viewer bridge server assert self.config.websocket_port is not None self.log_filename.parent.mkdir(exist_ok=True) zmq_port = run_viewer_bridge_server_as_subprocess( self.config.websocket_port, zmq_port=self.config.zmq_port, ip_address=self.config.ip_address, log_filename=str(self.log_filename), ) # TODO(ethan): log the output of the viewer bridge server in a file where the training logs go CONSOLE.line() version = get_viewer_version() websocket_url = f"ws://localhost:{self.config.websocket_port}" self.viewer_url = f"https://viewer.nerf.studio/versions/{version}/?websocket_url={websocket_url}" CONSOLE.rule(characters="=") CONSOLE.print(f"[Public] Open the viewer at {self.viewer_url}") CONSOLE.rule(characters="=") CONSOLE.line() self.vis = Viewer(zmq_port=zmq_port, ip_address=self.config.ip_address) else: assert self.config.zmq_port is not None self.vis = Viewer(zmq_port=self.config.zmq_port, ip_address=self.config.ip_address) # viewer specific variables self.prev_camera_matrix = None self.prev_output_type = OutputTypes.INIT self.prev_colormap_type = ColormapTypes.INIT self.prev_moving = False self.output_type_changed = True self.max_resolution = 1000 self.check_interrupt_vis = False self.check_done_render = True self.step = 0 self.static_fps = 1 self.moving_fps = 24 self.camera_moving = False self.prev_camera_timestamp = 0 self.probe_config = None self.output_list = None def _pick_drawn_image_idxs(self, total_num: int) -> list[int]: """Determine indicies of images to display in viewer. Args: total_num: total number of training images. Returns: List of indices from [0, total_num-1]. """ if self.config.max_num_display_images < 0: num_display_images = total_num else: num_display_images = min(self.config.max_num_display_images, total_num) # draw indices, roughly evenly spaced return np.linspace(0, total_num - 1, num_display_images, dtype=np.int32).tolist() def init_scene(self, dataset: InputDataset, start_train=True) -> None: """Draw some images and the scene aabb in the viewer. Args: dataset: dataset to render in the scene start_train: whether to start train when viewer init; if False, only displays dataset until resume train is toggled """ # set the config base dir self.vis["renderingState/config_base_dir"].write(str(self.log_filename.parents[0])) # clear the current scene self.vis["sceneState/sceneBox"].delete() self.vis["sceneState/cameras"].delete() # draw the training cameras and images image_indices = self._pick_drawn_image_idxs(len(dataset)) for idx in image_indices: image = dataset[idx]["image"] if isinstance(image, BasicImages): bgr = image.images[0][..., [2, 1, 0]] else: bgr = image[..., [2, 1, 0]] camera_json = dataset.cameras.to_json(camera_idx=idx, image=bgr, max_size=100) self.vis[f"sceneState/cameras/{idx:06d}"].write(camera_json) # draw the scene box (i.e., the bounding box) json_ = dataset.scene_box.to_json() self.vis["sceneState/sceneBox"].write(json_) # set the initial state whether to train or not self.vis["renderingState/isTraining"].write(start_train) # self.vis["renderingState/render_time"].write(str(0)) self.probe_config = dataset.cameras.probe_config # set the properties of the camera # self.vis["renderingState/camera"].write(json_) # set the main camera intrinsics to one from the dataset # K = camera.get_intrinsics_matrix() # set_persp_intrinsics_matrix(self.vis, K.double().numpy()) def _check_camera_path_payload(self, trainer, step: int): """Check to see if the camera path export button was pressed.""" # check if we should interrupt from a button press? camera_path_payload = self.vis["camera_path_payload"].read() if camera_path_payload: # save a model checkpoint trainer.save_checkpoint(step) # write to json file camera_path_filename = camera_path_payload["camera_path_filename"] + '.json' camera_path = camera_path_payload["camera_path"]
write_to_json(Path(camera_path_filename), camera_path)
12
2023-12-15 20:07:22+00:00
24k
amazon-science/c2f-seg
data/dataloader_transformer.py
[ { "identifier": "FishBowl", "path": "data/dataloader_Fishbowl.py", "snippet": "class FishBowl(object):\n def __init__(self, config, mode, subtest=None):\n self.datatype = mode\n data_dir = config.root_path\n\n self.img_path = os.path.join(data_dir, self.datatype+\"_data\", self.datatype+\"_frames\")\n self.mode = mode\n self.dtype = torch.float32\n self.test_set = subtest\n \n self.data_summary = pickle.load(open(os.path.join(data_dir, self.datatype+\"_data\", self.datatype+\"_data.pkl\"), \"rb\"))\n self.obj_lists = list(self.data_summary.keys())\n self.device = \"cpu\"\n\n self.seq_len = 32 if self.mode == \"test\" else config.train_seq_len\n\n self.cur_vid = None\n self.video_frames = None\n self.patch_h = config.patch_H\n self.patch_w = config.patch_W\n self.enlarge_coef = config.enlarge_coef\n\n def decode2binarymask(self, masks):\n mask = mask_utils.decode(masks)\n binary_masks = mask.astype('bool') # (Image_W,Image_H,128)\n binary_masks = binary_masks.transpose(2,0,1) #(128, Image_W, Image_H)\n return binary_masks\n\n def __len__(self):\n return len(self.obj_lists)\n\n def __getitem__(self, idx):\n v_id, obj_id = self.obj_lists[idx].split(\"_\")\n if v_id != self.cur_vid:\n self.cur_vid = v_id\n fm_crop = []\n fm_no_crop = []\n vm_crop = []\n vm_no_crop = []\n img_crop = []\n \n obj_position = []\n\n counts = []\n loss_mask_weight = []\n\n # for evaluation \n video_ids = []\n object_ids = []\n frame_ids = []\n\n obj_dict = self.data_summary[self.obj_lists[idx]]\n timesteps = list(obj_dict.keys())\n assert np.all(np.diff(sorted(timesteps))==1)\n start_t, end_t = min(timesteps), max(timesteps)\n # print(start_t, end_t)\n if self.mode != \"test\" and end_t - start_t > self.seq_len - 1:\n start_t = np.random.randint(start_t, end_t-(self.seq_len-2))\n end_t = start_t + self.seq_len - 1\n\n if self.mode == \"test\":\n if start_t + self.seq_len-1<=end_t:\n end_t = start_t + self.seq_len-1\n\n for t_step in range(start_t, end_t):\n image_path = os.path.join(self.img_path, v_id, str(t_step).zfill(5)+'.png')\n img = cv2.imread(image_path)[:,:,::-1]\n # get visible mask and full mask\n vm = self.decode2binarymask(obj_dict[t_step][\"VM\"])[0]\n fm = self.decode2binarymask(obj_dict[t_step][\"FM\"])[0] # 320, 480\n vx_min, vx_max, vy_min, vy_max = obj_dict[t_step][\"VM_bx\"]\n x_center = (vx_min + vx_max) // 2\n y_center = (vy_min + vy_max) // 2\n x_len = int((vx_max - vx_min) * self.enlarge_coef)\n y_len = int((vy_max - vy_min) * self.enlarge_coef)\n vx_min = max(0, x_center - x_len // 2)\n vx_max = min(320, x_center + x_len // 2)\n vy_min = max(0, y_center - y_len // 2)\n vy_max = min(480, y_center + y_len // 2)\n\n obj_position.append([vx_min, vx_max, vy_min, vy_max])\n vm_crop.append(vm[vx_min:vx_max+1, vy_min:vy_max+1])\n fm_crop.append(fm[vx_min:vx_max+1, vy_min:vy_max+1])\n img_crop.append(img[vx_min:vx_max+1, vy_min:vy_max+1])\n\n vm_no_crop.append(vm)\n fm_no_crop.append(fm)\n # get loss mask\n loss_mask_weight.append(self.decode2binarymask(obj_dict[t_step][\"loss_mask_weight\"])[0])\n\n # for evaluation\n video_ids.append(int(v_id))\n object_ids.append(int(obj_id))\n frame_ids.append(t_step)\n counts.append(1)\n \n if True:\n num_pad = self.seq_len - (end_t - start_t)\n for _ in range(num_pad):\n obj_position.append(copy.deepcopy(obj_position[-1]))\n\n fm_crop.append(copy.deepcopy(fm_crop[-1]))\n fm_no_crop.append(copy.deepcopy(fm_no_crop[-1]))\n vm_crop.append(copy.deepcopy(vm_crop[-1]))\n vm_no_crop.append(copy.deepcopy(vm_no_crop[-1]))\n img_crop.append(copy.deepcopy(img_crop[-1]))\n\n loss_mask_weight.append(copy.deepcopy(loss_mask_weight[-1]))\n \n video_ids.append(video_ids[-1])\n object_ids.append(object_ids[-1])\n frame_ids.append(frame_ids[-1] + 1)\n counts.append(0)\n \n vm_crop, vm_crop_gt, fm_crop, img_crop, vm_pad, vm_scale = self.crop_and_rescale(vm_crop, fm_crop, img_crop)\n\n vm_crop = np.stack(vm_crop, axis=0) # Seq_len * h * w\n vm_crop_gt = np.stack(vm_crop_gt, axis=0) # Seq_len * h * w\n vm_no_crop = np.stack(vm_no_crop, axis=0) # Seq_len * H * W\n fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_no_crop = np.stack(fm_no_crop, axis=0) # Seq_len * H * W\n\n vm_crop = torch.from_numpy(np.array(vm_crop)).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(np.array(vm_crop_gt)).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n fm_crop = torch.from_numpy(np.array(fm_crop)).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n vm_pad = torch.from_numpy(np.array(vm_pad)).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(np.array(vm_scale)).to(self.dtype).to(self.device)\n\n video_ids = torch.from_numpy(np.array(video_ids)).to(self.dtype).to(self.device)\n object_ids = torch.from_numpy(np.array(object_ids)).to(self.dtype).to(self.device)\n frame_ids = torch.from_numpy(np.array(frame_ids)).to(self.dtype).to(self.device)\n counts = torch.from_numpy(np.array(counts)).to(self.dtype).to(self.device)\n loss_mask_weight = torch.from_numpy(np.array(loss_mask_weight)).to(self.dtype).to(self.device) \n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n\n obj_data = {\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"vm_no_crop\": vm_no_crop,\n \"fm_crop\": fm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"img_crop\": img_crop,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"video_ids\": video_ids,\n \"object_ids\": object_ids,\n \"frame_ids\": frame_ids,\n \"counts\": counts,\n \"loss_mask\": loss_mask_weight, \n \"obj_position\": obj_position,\n }\n\n return obj_data\n\n def crop_and_rescale(self, vm_crop, fm_crop_vm=None, img_crop=None):\n h, w = np.array([m.shape for m in vm_crop]).max(axis=0)\n vm_pad = []\n vm_scale = []\n vm_crop_gt = []\n\n for i, m in enumerate(vm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n vm_pad.append(np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)]))\n vm_scale.append(np.array([self.patch_h/h, self.patch_w/w]))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n if self.mode==\"train\":\n vm_crop[i] = self.data_augmentation(m)\n vm_crop_gt.append(m)\n else:\n vm_crop[i] = m\n vm_crop_gt.append(m)\n\n for i, m in enumerate(fm_crop_vm):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n fm_crop_vm[i] = m\n\n for i, img_ in enumerate(img_crop):\n img_ = transform.rescale(img_, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop[i] = img_\n\n vm_pad = np.stack(vm_pad)\n vm_scale = np.stack(vm_scale)\n return vm_crop, vm_crop_gt, fm_crop_vm, img_crop, vm_pad, vm_scale\n \n def getImg(self, v_id):\n imgs = []\n imgs_list = os.listdir(os.path.join(self.img_path, v_id))\n imgs_list.sort()\n for sub_path in imgs_list:\n img_path = os.path.join(self.img_path, v_id, sub_path)\n img_tmp = plt.imread(img_path)\n imgs.append(img_tmp)\n assert len(imgs) == 128\n return imgs\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n for item in sample_loader:\n yield item\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n return res\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.1 and rdv < 0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.5:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.5 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)" }, { "identifier": "MOViD_A", "path": "data/dataloader_MOViD_A.py", "snippet": "class MOViD_A(object):\n def __init__(self, config, mode):\n super(MOViD_A, self).__init__()\n self.mode = mode\n self.dtype = torch.float32\n self.device = \"cpu\"\n root_path = config.root_path\n self.data_dir = os.path.join(root_path, mode)\n \n self.instance_list = np.genfromtxt(\n os.path.join(root_path, \"{}_instance.txt\".format(mode)),\n dtype=np.str,\n encoding='utf-8'\n )\n\n self.train_seq_len = 24\n self.cur_vid = None\n self.patch_h = config.patch_H\n self.patch_w = config.patch_W\n self.enlarge_coef = config.enlarge_coef\n\n def __len__(self):\n return len(self.instance_list)\n\n def __getitem__(self, idx, specified_V_O_id=None):\n # whether choose a specific instance to load\n if specified_V_O_id is None:\n v_id, obj_id, value = self.instance_list[idx].split(\"_\")\n else:\n v_id, obj_id, value = specified_V_O_id.split(\"_\")\n v_id, obj_id, value = int(v_id), int(obj_id), int(value)\n if v_id != self.cur_vid:\n self.cur_vid = v_id\n self.video_path = os.path.join(self.data_dir, str(v_id))\n metadata = self.read_json(os.path.join(self.video_path, 'metadata.json'))\n\n self.num_frames = metadata[\"metadata\"][\"num_frames\"]\n self.height = metadata['metadata']['height']\n self.width = metadata['metadata']['width']\n self.instances = [self.format_instance_information(obj) for obj in metadata[\"instances\"]]\n\n vis_mask_paths = [os.path.join(self.video_path, \"segmentation_full_{}.png\".format(str(f).zfill(5))) for f in range(self.num_frames)]\n vis_mask = [np.array(Image.open(frame_path)) for frame_path in vis_mask_paths] #[t,h,w]\n\n full_mask_paths = [os.path.join(self.video_path, \"segmentation_{}_{}.png\".format(obj_id, str(f).zfill(5))) for f in range(self.num_frames)]\n full_mask = [np.array(Image.open(frame_path)) for frame_path in full_mask_paths] #[t,h,w]\n \n rgb_img_path = [os.path.join(self.video_path, \"rgba_full_{}.png\".format(str(f).zfill(5))) for f in range(self.num_frames)]\n rgb_img = [np.array(Image.open(frame_path))[...,:3] for frame_path in rgb_img_path]\n \n counts = []\n obj_position = []\n\n vm_crop = []\n vm_no_crop = []\n fm_crop = []\n fm_no_crop = []\n loss_mask_weight = []\n img_crop = []\n # for evaluation \n video_ids = []\n object_ids = []\n frame_ids = []\n\n timesteps = self.instances[obj_id]['bbox_frames']\n start_t, end_t = 0, 23\n if self.mode != \"test\" and end_t - start_t > self.train_seq_len - 1:\n start_t = np.random.randint(start_t, end_t-(self.train_seq_len-2))\n end_t = start_t + self.train_seq_len - 1\n\n for t_step in range(start_t, end_t+1):\n Image_H, Image_W = self.height, self.width\n # some objects will move out the field of view in some frames\n if t_step in timesteps:\n index = self.instances[obj_id][\"bbox_frames\"].index(t_step)\n xmin, ymin, xmax, ymax = self.instances[obj_id][\"bboxes\"][index]\n vx_min, vy_min, vx_max, vy_max = int(Image_H*xmin), int(Image_W*ymin), int(Image_H*xmax), int(Image_W*ymax)\n counts.append(1)\n else:\n bboxs = mask_find_bboxs(full_mask[t_step].astype(np.uint8))\n \n if bboxs.size==0:\n vx_min, vy_min, vx_max, vy_max = 0, 0, 256, 256\n else:\n b = bboxs[-1][:4]\n vx_min, vy_min, vx_max, vy_max = b[1], b[0], b[1]+b[3], b[0]+b[2]\n counts.append(0)\n\n # enlarge the bbox\n x_center = (vx_min + vx_max) // 2\n y_center = (vy_min + vy_max) // 2\n x_len = int((vx_max - vx_min) * self.enlarge_coef)\n y_len = int((vy_max - vy_min) * self.enlarge_coef)\n vx_min = max(0, x_center - x_len // 2)\n vx_max = min(Image_H, x_center + x_len // 2)\n vy_min = max(0, y_center - y_len // 2)\n vy_max = min(Image_W, y_center + y_len // 2)\n\n obj_position.append([vx_min, vx_max, vy_min, vy_max])\n\n # get mask\n vm = vis_mask[t_step]\n vm_crop.append(vm[vx_min:vx_max+1, vy_min:vy_max+1]==value)\n vm_no_crop.append(vm==value)\n\n fm = full_mask[t_step]\n fm_crop.append(fm[vx_min:vx_max+1, vy_min:vy_max+1]==value)\n fm_no_crop.append(fm==value)\n \n # get image\n image = rgb_img[t_step]\n img_crop.append(image[vx_min:vx_max+1, vy_min:vy_max+1])\n\n # get loss mask\n fore_ground = vm == 0\n obj_ground = vm==value\n loss_mask = np.logical_or(fore_ground, obj_ground)\n\n loss_mask_weight.append(loss_mask)\n\n # for evaluation\n video_ids.append(v_id)\n object_ids.append(obj_id)\n frame_ids.append(t_step)\n\n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n \n vm_crop, fm_crop, vm_pad, vm_scale, vm_crop_gt, img_crop = self.crop_and_rescale(vm_crop, fm_crop, img_crop)\n\n vm_crop = np.stack(vm_crop, axis=0) # Seq_len * h * w\n vm_no_crop = np.stack(vm_no_crop, axis=0) # Seq_len * H * W\n # fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_no_crop = np.stack(fm_no_crop, axis=0) # Seq_len * H * W\n img_crop = np.stack(img_crop, axis=0) # Sqe_len * H * W\n\n vm_crop = torch.from_numpy(np.array(vm_crop)).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n fm_crop = torch.from_numpy(np.array(fm_crop)).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n vm_pad = torch.from_numpy(np.array(vm_pad)).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(np.array(vm_scale)).to(self.dtype).to(self.device)\n\n video_ids = torch.from_numpy(np.array(video_ids)).to(self.dtype).to(self.device)\n object_ids = torch.from_numpy(np.array(object_ids)).to(self.dtype).to(self.device)\n frame_ids = torch.from_numpy(np.array(frame_ids)).to(self.dtype).to(self.device)\n counts = torch.from_numpy(np.array(counts)).to(self.dtype).to(self.device)\n loss_mask_weight = torch.from_numpy(np.array(loss_mask_weight)).to(self.dtype).to(self.device) \n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n\n obj_data = {\n \"vm_crop\": vm_crop,\n \"vm_no_crop\": vm_no_crop,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n\n \"img_crop\": img_crop,\n \n \"fm_crop\": fm_crop,\n \"fm_no_crop\": fm_no_crop,\n\n \"obj_position\": obj_position, \n \"loss_mask\": loss_mask_weight, \n \"counts\": counts,\n \"video_ids\": video_ids,\n \"object_ids\": object_ids,\n \"frame_ids\": frame_ids,\n }\n\n return obj_data\n\n def crop_and_rescale(self, vm_crop, fm_crop=None,img_crop=None):\n h, w = np.array([m.shape for m in vm_crop]).max(axis=0)\n vm_pad = []\n vm_crop_gt = []\n vm_scale = []\n for i, img in enumerate(img_crop):\n img = transform.rescale(img, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img = np.pad(img, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop[i] = img\n\n for i, m in enumerate(vm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n if self.mode==\"train\":\n vm_crop[i] = self.data_augmentation(m)\n else:\n vm_crop[i] = m\n vm_crop_gt.append(m)\n vm_pad.append(np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)]))\n vm_scale.append(np.array([self.patch_h/h, self.patch_w/w]))\n\n for i, m in enumerate(fm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n fm_crop[i] = m\n\n vm_pad = np.stack(vm_pad)\n vm_scale = np.stack(vm_scale)\n return vm_crop, fm_crop, vm_pad, vm_scale, vm_crop_gt,img_crop\n \n def read_json(self,dir_):\n with open(dir_) as f:\n data = json.load(f)\n return data\n\n def format_instance_information(self, obj):\n return {\n \"bboxes\": obj[\"bboxes\"],\n \"bbox_frames\": obj[\"bbox_frames\"],\n }\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n return res\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.1 and rdv < 0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.5:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.5 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)" }, { "identifier": "Kins_Fusion_dataset", "path": "data/dataloader_KINS.py", "snippet": "class Kins_Fusion_dataset(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(Kins_Fusion_dataset, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Fusion dataset\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n self.img_root_path = os.path.join(self.root_path, \"{}ing\".format(mode),\"image_2\")\n \n # Load the GT of AISFormer\n if mode==\"train\":\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_train.json\"))\n else:\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_val_upate.json\"))\n annotations = aisformer_gt[\"annotations\"]\n images = aisformer_gt[\"images\"]\n self.images, self.annotations = self.make_json_dict(images, annotations)\n \n # Load the GT of vanilla KINS\n self.base_img_path = os.path.join(self.root_path, \"{}ing\".format(mode), \"image_2\")\n self.base_ann_path= os.path.join(self.root_path, \"update_{}_2020.json\".format(mode))\n annotations = cvb.load(self.base_ann_path)\n imgs_info = annotations['images']\n anns_info = annotations[\"annotations\"]\n self.imgs_dict, self.anns_dict = self.make_json_dict(imgs_info, anns_info)\n\n # dataloader setting\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def load_item(self, index):\n # load aisformer predicted visible masks\n if \"aisformer\" in self.label_info[index]:\n dataset_name, image_id, anno_id = self.label_info[index].split(\",\")\n image_id, anno_id = int(image_id), int(anno_id)\n # add image information\n img_name = self.images[image_id]\n img_path = os.path.join(self.img_root_path, img_name)\n # img_path = os.path.join(self.img_root_path, str(image_id).zfill(6)+ \".png\")\n img = np.array(Image.open(img_path))\n instances = self.data_info['{}_{}'.format(dataset_name, image_id)][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, width = segmentation[\"size\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n rles = mask_utils.frPyObjects(instances[\"gt_full_mask\"], height, width)\n fm_no_crop = mask_utils.decode(mask_utils.merge(rles)).astype(bool)\n fm_no_crop = fm_no_crop[..., np.newaxis]\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n \n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n \n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n \n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n vm_no_crop_gt = torch.from_numpy(np.array(vm_no_crop_gt)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n }\n return meta\n else:\n img_id, anno_id, category_id = self.label_info[index].split(\"_\")\n img_id, anno_id, category_id = int(img_id), int(anno_id), int(category_id)\n\n img_name = self.imgs_dict[img_id]\n img_path = os.path.join(self.base_img_path, img_name)\n \n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n height, width, _ = img.shape\n \n ann = self.anns_dict[img_id][anno_id]\n fm_no_crop = self.polys_to_mask(ann[\"a_segm\"], height, width)\n vm_no_crop = self.polys_to_mask(ann[\"i_segm\"], height, width)\n if np.sum(vm_no_crop)==0:\n counts = np.array([0])\n else:\n counts = np.array([1])\n y_min, x_min, w, h = ann[\"i_bbox\"]\n\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n \n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1].astype(bool)\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n loss_mask = fm_no_crop-vm_no_crop\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n # vm_crop here is the GT\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n img_id = torch.from_numpy(np.array(img_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # category_id = torch.from_numpy(np.array(category_id)).to(self.dtype).to(self.device)\n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n return meta\n\n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.2 and rdv <0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.55:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.55 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask" }, { "identifier": "KINS_Aisformer_VRSP_Intersection", "path": "data/dataloader_KINS.py", "snippet": "class KINS_Aisformer_VRSP_Intersection(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(KINS_Aisformer_VRSP_Intersection, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Intersection dataset\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"kins_intersection.pkl\"), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"kins_intersection_list.txt\"), dtype=np.str, encoding='utf-8')\n if mode==\"train\":\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_train.json\"))\n else:\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_val_upate.json\"))\n annotations = aisformer_gt[\"annotations\"]\n images = aisformer_gt[\"images\"]\n self.images, self.annotations = self.make_json_dict(images, annotations)\n self.img_root_path = os.path.join(self.root_path, \"{}ing\".format(mode), \"image_2\")\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def mask_find_bboxs(self, mask):\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)\n stats = stats[stats[:,4].argsort()]\n return stats\n \n def generate_heatmap(self, mask, kernel, sigma):\n heatmap = cv2.GaussianBlur(mask, kernel, sigma)\n am = np.amax(heatmap)\n heatmap /= am / 1\n return heatmap\n \n def load_item(self, index):\n image_id, anno_id = self.label_info[index].split(\"_\")\n image_id, anno_id = int(image_id), int(anno_id)\n instances = self.data_info[image_id][anno_id]\n\n segmentation = instances[\"pred_visible_mask\"]\n height, width = segmentation[\"size\"]\n # add image information\n img_name = self.images[image_id]\n img_path = os.path.join(self.img_root_path, img_name)\n # img_path = os.path.join(self.img_root_path, str(image_id).zfill(6)+ \".png\")\n img = Image.open(img_path)\n img = img.resize((width,height), Image.ANTIALIAS)\n img = np.array(img)\n \n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n # fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n rles = mask_utils.frPyObjects(instances[\"gt_full_mask\"], height, width)\n fm_no_crop = mask_utils.decode(mask_utils.merge(rles)).astype(bool)\n \n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n\n x_center_crop = x_center - x_min\n y_center_crop = y_center - y_min\n \n fm_no_crop = fm_no_crop[..., np.newaxis]\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n \n center_crop = np.zeros_like(vm_crop[0])\n x_center_crop = int(x_center_crop*self.patch_h/h)\n y_center_crop = int(y_center_crop*self.patch_w/w)\n center_crop[x_center_crop: x_center_crop+1, y_center_crop: y_center_crop+1]=1\n center_crop = self.generate_heatmap(center_crop.astype(np.float), (35, 35), 9)\n center_crop = center_crop[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n refine_loss_mask = 1 - (vm_crop_gt==vm_crop).astype(bool)\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n # import pdb;pdb.set_trace()\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop_gt = torch.from_numpy(vm_no_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n refine_loss_mask = torch.from_numpy(np.array(refine_loss_mask)).to(self.dtype).to(self.device)\n center_crop = torch.from_numpy(np.array(center_crop)).to(self.dtype).to(self.device)\n \n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n # elif self.mode==\"test\":\n # meta = {\n # # \"vm_no_crop\": vm_no_crop,\n # \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # # \"vm_no_crop_gt\": vm_no_crop_gt,\n # # \"refine_loss_mask\": refine_loss_mask,\n # # \"fm_no_crop\": fm_no_crop,\n # \"fm_crop\": fm_crop,\n # \"img_crop\": img_crop,\n # # \"loss_mask\": loss_mask,\n # # \"obj_position\": obj_position,\n # # \"vm_pad\": vm_pad,\n # # \"vm_scale\": vm_scale,\n # # \"counts\":counts,\n # # \"img_id\": image_id,\n # # \"anno_id\": anno_id,\n # # # for vq\n # # # \"mask_crop\": fm_crop\n # # # \"img\":img,\n # }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n \"img\":img,\n }\n return meta\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask\n \n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict" }, { "identifier": "COCOA_Fusion_dataset", "path": "data/dataloader_COCOA.py", "snippet": "class COCOA_Fusion_dataset(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(COCOA_Fusion_dataset, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Fusion dataset \n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n \n if mode==\"train\":\n train_label = cvb.load(os.path.join(self.root_path, \"COCO_amodal_train2014_with_classes.json\"))\n self.anns_dict = train_label[\"annotations\"]\n self.img_root_path = os.path.join(self.root_path, \"train2014\")\n elif mode==\"test\":\n val_label = cvb.load(os.path.join(self.root_path, \"COCO_amodal_val2014_with_classes.json\"))\n self.anns_dict = val_label[\"annotations\"]\n self.img_root_path = os.path.join(self.root_path, \"val2014\")\n \n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def load_item(self, index):\n # predicted vm\n if len(self.label_info[index].split(\",\"))==3:\n dataset_name, image_id, anno_id = self.label_info[index].split(\",\")\n image_id, anno_id = int(image_id), int(anno_id)\n if self.mode==\"train\":\n img_path = os.path.join(self.img_root_path, \"COCO_{}2014_{}.jpg\".format(self.mode, str(image_id).zfill(12)))\n elif self.mode==\"test\":\n img_path = os.path.join(self.img_root_path, \"COCO_val2014_{}.jpg\".format(str(image_id).zfill(12)))\n img = np.array(Image.open(img_path))\n if len(img.shape)==2:\n img = np.repeat(img[:, :, np.newaxis], 3, axis=2)\n instances = self.data_info[\"{}_{}\".format(dataset_name, image_id)][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, weight = segmentation[\"size\"]\n # occlude_rate = instances[\"occlude_rate\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(weight, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n # import pdb;pdb.set_trace()\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n # if self.mode==\"test\":\n # loss_mask = mask_utils.decode([instances[\"loss_mask\"]]).astype(bool)[...,0]\n # else:\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # occlude_rate = torch.from_numpy(np.array(occlude_rate)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n # \"vm_crop\": vm_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop_gt,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"img_crop\": img_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # \"occlude_rate\":occlude_rate\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img,\n }\n return meta\n # gt vm\n elif len(self.label_info[index].split(\",\"))==2:\n anno_id, img_path = self.label_info[index].split(\",\")\n anno_id = int(anno_id)\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n height, width, _ = img.shape\n\n ann = self.anns_dict[anno_id]\n img_id = ann[\"image_id\"]\n # category_id = ann[\"category_id\"]\n\n full_mask = ann[\"segmentation\"]\n fm_no_crop = mask_utils.decode(full_mask)[...,np.newaxis]\n\n visible_mask = ann[\"visible_mask\"]\n vm_no_crop = mask_utils.decode(visible_mask)[...,np.newaxis]\n\n if np.sum(vm_no_crop)==0:\n counts = np.array([0])\n else:\n counts = np.array([1])\n y_min, x_min, w, h = ann[\"bbox\"]\n y_max, x_max = y_min + w, x_min + h\n y_min, x_min, y_max, x_max = int(y_min), int(x_min), int(y_max), int(x_max) \n\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n \n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n\n # full_pad = ((0, max(375-height, 0)), (0, max(1242-width, 0)))\n # vm_no_crop = np.pad(vm_no_crop, full_pad)[:375, :1242]\n # fm_no_crop = np.pad(fm_no_crop, full_pad)[:375, :1242]\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n loss_mask = fm_no_crop-vm_no_crop\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n \n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(img_crop).to(self.dtype).to(self.device)\n img = torch.from_numpy(img).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n \n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n img_id = torch.from_numpy(np.array(img_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # category_id = torch.from_numpy(np.array(category_id)).to(self.dtype).to(self.device)\n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\": counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # \"category_id\": category_id,\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # \"category_id\": category_id,\n # for vq\n # \"mask_crop\": fm_crop\n \"img_no_crop\": img,\n }\n return meta\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask\n\n # def data_augmentation(self, mask):\n # return mask\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.2 and rdv <0.9:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.6:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.6 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)\n \n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict" }, { "identifier": "COCOA_VRSP", "path": "data/dataloader_COCOA.py", "snippet": "class COCOA_VRSP(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(COCOA_VRSP, self).__init__()\n self.config = config\n self.mode = mode\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n \n if self.mode==\"train\":\n self.img_root_path = os.path.join(self.root_path, \"train2014\")\n elif self.mode==\"test\":\n self.img_root_path = os.path.join(self.root_path, \"val2014\")\n\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def generate_heatmap(self, mask, kernel, sigma):\n heatmap = cv2.GaussianBlur(mask, kernel, sigma)\n am = np.amax(heatmap)\n heatmap /= am / 1\n return heatmap\n \n def load_item(self, index):\n image_id, anno_id = self.label_info[index].split(\"_\")\n image_id, anno_id = int(image_id), int(anno_id)\n if self.mode==\"train\":\n img_path = os.path.join(self.img_root_path, \"COCO_{}2014_{}.jpg\".format(self.mode, str(image_id).zfill(12)))\n elif self.mode==\"test\":\n img_path = os.path.join(self.img_root_path, \"COCO_val2014_{}.jpg\".format(str(image_id).zfill(12)))\n img = np.array(Image.open(img_path))\n if len(img.shape)==2:\n img = np.repeat(img[:, :, np.newaxis], 3, axis=2)\n instances = self.data_info[image_id][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, weight = segmentation[\"size\"]\n occlude_rate = instances[\"occlude_rate\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(weight, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n \n x_center_crop = x_center - x_min\n y_center_crop = y_center - y_min\n\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n center_crop = np.zeros_like(vm_crop[0])\n x_center_crop = int(x_center_crop*self.patch_h/h)\n y_center_crop = int(y_center_crop*self.patch_w/w)\n center_crop[x_center_crop: x_center_crop+1, y_center_crop: y_center_crop+1]=1\n center_crop = self.generate_heatmap(center_crop.astype(np.float), (35, 35), 9)\n center_crop = center_crop[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n center_crop = torch.from_numpy(np.array(center_crop)).to(self.dtype).to(self.device)\n \n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n occlude_rate = torch.from_numpy(np.array(occlude_rate)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n \"img_no_crop\": img,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n \"occlude_rate\":occlude_rate,\n # # for vq\n # \"mask_crop\": fm_crop,\n \"img\": img,\n }\n return meta\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask" } ]
from data.dataloader_Fishbowl import FishBowl from data.dataloader_MOViD_A import MOViD_A from data.dataloader_KINS import Kins_Fusion_dataset, KINS_Aisformer_VRSP_Intersection from data.dataloader_COCOA import COCOA_Fusion_dataset, COCOA_VRSP
21,149
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS":
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS":
train_dataset = Kins_Fusion_dataset(config, mode='train')
2
2023-12-21 04:25:47+00:00
24k
alipay/PainlessInferenceAcceleration
pia/lookahead/models/baichuan/modeling_baichuan.py
[ { "identifier": "LookaheadPreTrainedModel", "path": "pia/lookahead/common/pretrained_model.py", "snippet": "class LookaheadPreTrainedModel(PreTrainedModel):\n _batch_generation = False\n _stream_generation = False\n\n def __init__(self, config):\n super().__init__(config=config)\n\n def _get_generation_mode(\n self, generation_config: GenerationConfig, assistant_model: Optional[\"PreTrainedModel\"]\n ) -> GenerationMode:\n \"\"\"\n Returns the generation mode triggered by a [`GenerationConfig`] instance.\n \"\"\"\n if generation_config.constraints is not None or generation_config.force_words_ids is not None:\n generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH\n elif generation_config.num_beams == 1:\n if generation_config.do_sample is False:\n if (\n generation_config.top_k is not None\n and generation_config.top_k > 1\n and generation_config.penalty_alpha is not None\n and generation_config.penalty_alpha > 0\n ):\n generation_mode = GenerationMode.CONTRASTIVE_SEARCH\n elif generation_config.use_cache \\\n and hasattr(generation_config, 'decoding_kwargs') \\\n and generation_config.decoding_kwargs.get('use_lookahead', False) \\\n and generation_config.decoding_kwargs.get('decoding_length', 64) > 1 \\\n and generation_config.decoding_kwargs.get('branch_length', 12) > 0:\n generation_mode = GenerationMode.LOOKAHEAD_GENERATION\n else:\n generation_mode = GenerationMode.GREEDY_SEARCH\n else:\n if generation_config.use_cache \\\n and hasattr(generation_config, 'decoding_kwargs') \\\n and generation_config.decoding_kwargs.get('use_lookahead', False) \\\n and generation_config.decoding_kwargs.get('decoding_length', 64) > 1 \\\n and generation_config.decoding_kwargs.get('branch_length', 12) > 0:\n generation_mode = GenerationMode.LOOKAHEAD_GENERATION\n else:\n generation_mode = GenerationMode.SAMPLE\n else:\n if generation_config.num_beam_groups > 1:\n generation_mode = GenerationMode.GROUP_BEAM_SEARCH\n elif generation_config.do_sample is True:\n generation_mode = GenerationMode.BEAM_SAMPLE\n else:\n generation_mode = GenerationMode.BEAM_SEARCH\n\n # Assisted generation may extend some generation modes\n if assistant_model is not None:\n if generation_mode in (\"greedy_search\", \"sample\"):\n generation_mode = GenerationMode.ASSISTED_GENERATION\n else:\n raise ValueError(\n \"You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate \"\n \"is only supported with Greedy Search and Sample.\"\n )\n return generation_mode\n\n @torch.no_grad()\n def generate(\n self,\n inputs: Optional[torch.Tensor] = None,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n synced_gpus: Optional[bool] = None,\n assistant_model: Optional[\"PreTrainedModel\"] = None,\n streamer: Optional[\"BaseStreamer\"] = None,\n **kwargs,\n ) -> Union[GenerateOutput, torch.LongTensor]:\n r\"\"\"\n\n Generates sequences of token ids for models with a language modeling head.\n\n <Tip warning={true}>\n\n Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the\n model's default generation configuration. You can override any `generation_config` by passing the corresponding\n parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.\n\n For an overview of generation strategies and code examples, check out the [following\n guide](../generation_strategies).\n\n </Tip>\n\n Parameters:\n inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):\n The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the\n method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`\n should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of\n `input_ids`, `input_values`, `input_features`, or `pixel_values`.\n generation_config (`~generation.GenerationConfig`, *optional*):\n The generation configuration to be used as base parametrization for the generation call. `**kwargs`\n passed to generate matching the attributes of `generation_config` will override them. If\n `generation_config` is not provided, the default will be used, which had the following loading\n priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model\n configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s\n default values, whose documentation should be checked to parameterize generation.\n logits_processor (`LogitsProcessorList`, *optional*):\n Custom logits processors that complement the default logits processors built from arguments and\n generation config. If a logit processor is passed that is already created with the arguments or a\n generation config an error is thrown. This feature is intended for advanced users.\n stopping_criteria (`StoppingCriteriaList`, *optional*):\n Custom stopping criteria that complement the default stopping criteria built from arguments and a\n generation config. If a stopping criteria is passed that is already created with the arguments or a\n generation config an error is thrown. This feature is intended for advanced users.\n prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):\n If provided, this function constraints the beam search to allowed tokens only at each step. If not\n provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and\n `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned\n on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful\n for constrained generation conditioned on the prefix, as described in [Autoregressive Entity\n Retrieval](https://arxiv.org/abs/2010.00904).\n synced_gpus (`bool`, *optional*):\n Whether to continue running the while loop until max_length. Unless overridden this flag will be set to\n `True` under DeepSpeed ZeRO Stage 3 multiple GPUs environment to avoid hanging if one GPU finished\n generating before other GPUs. Otherwise it'll be set to `False`.\n assistant_model (`PreTrainedModel`, *optional*):\n An assistant model that can be used to accelerate generation. The assistant model must have the exact\n same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model\n is much faster than running generation with the model you're calling generate from. As such, the\n assistant model should be much smaller.\n streamer (`BaseStreamer`, *optional*):\n Streamer object that will be used to stream the generated sequences. Generated tokens are passed\n through `streamer.put(token_ids)` and the streamer is responsible for any further processing.\n kwargs (`Dict[str, Any]`, *optional*):\n Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be\n forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder\n specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.\n\n Return:\n [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`\n or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.\n\n If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible\n [`~utils.ModelOutput`] types are:\n\n - [`~generation.GreedySearchDecoderOnlyOutput`],\n - [`~generation.SampleDecoderOnlyOutput`],\n - [`~generation.BeamSearchDecoderOnlyOutput`],\n - [`~generation.BeamSampleDecoderOnlyOutput`]\n\n If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible\n [`~utils.ModelOutput`] types are:\n\n - [`~generation.GreedySearchEncoderDecoderOutput`],\n - [`~generation.SampleEncoderDecoderOutput`],\n - [`~generation.BeamSearchEncoderDecoderOutput`],\n - [`~generation.BeamSampleEncoderDecoderOutput`]\n \"\"\"\n\n if synced_gpus is None:\n # if is_deepspeed_zero3_enabled() and dist.get_world_size() > 1:\n # synced_gpus = True\n # else:\n # synced_gpus = False\n synced_gpus = False\n\n # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call\n self._validate_model_class()\n\n # priority: `generation_config` argument > `model.generation_config` (the default generation config)\n if generation_config is None:\n # legacy: users may modify the model configuration to control generation -- update the generation config\n # model attribute accordingly, if it was created from the model config\n if self.generation_config._from_model_config:\n new_generation_config = GenerationConfig.from_model_config(self.config)\n if new_generation_config != self.generation_config:\n # warnings.warn(\n # \"You have modified the pretrained model configuration to control generation. This is a\"\n # \" deprecated strategy to control generation and will be removed soon, in a future version.\"\n # \" Please use a generation configuration file (see\"\n # \" https://huggingface.co/docs/transformers/main_classes/text_generation )\"\n # )\n self.generation_config = new_generation_config\n generation_config = self.generation_config\n\n generation_config = copy.deepcopy(generation_config)\n model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs\n generation_config.validate()\n self._validate_model_kwargs(model_kwargs.copy())\n if not hasattr(generation_config, 'decoding_kwargs'):\n generation_config.decoding_kwargs = model_kwargs.get('decoding_kwargs', {})\n\n # 2. Set generation parameters if not already defined\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:\n if model_kwargs.get(\"attention_mask\", None) is None:\n logger.warning(\n \"The attention mask and the pad token id were not set. As a consequence, you may observe \"\n \"unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\"\n )\n eos_token_id = generation_config.eos_token_id\n if isinstance(eos_token_id, list):\n eos_token_id = eos_token_id[0]\n logger.warning(f\"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.\")\n generation_config.pad_token_id = eos_token_id\n\n # 3. Define model inputs\n # inputs_tensor has to be defined\n # model_input_name is defined if model-specific keyword input is passed\n # otherwise model_input_name is None\n # all model-specific keyword inputs are removed from `model_kwargs`\n inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(\n inputs, generation_config.bos_token_id, model_kwargs\n )\n batch_size = inputs_tensor.shape[0]\n\n # 4. Define other model kwargs\n model_kwargs[\"output_attentions\"] = generation_config.output_attentions\n model_kwargs[\"output_hidden_states\"] = generation_config.output_hidden_states\n # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are\n # generating the first new token or not, and we only want to use the embeddings for the first new token)\n if not self.config.is_encoder_decoder and model_input_name == \"inputs_embeds\":\n model_kwargs[\"use_cache\"] = True\n else:\n model_kwargs[\"use_cache\"] = generation_config.use_cache\n\n accepts_attention_mask = \"attention_mask\" in set(inspect.signature(self.forward).parameters.keys())\n requires_attention_mask = \"encoder_outputs\" not in model_kwargs\n\n if model_kwargs.get(\"attention_mask\", None) is None and requires_attention_mask and accepts_attention_mask:\n model_kwargs[\"attention_mask\"] = self._prepare_attention_mask_for_generation(\n inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id\n )\n\n # decoder-only models should use left-padding for generation\n if not self.config.is_encoder_decoder:\n # If `input_ids` was given, check if the last id in any sequence is `pad_token_id`\n # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.\n if (\n generation_config.pad_token_id is not None\n and len(inputs_tensor.shape) == 2\n and torch.sum(inputs_tensor[:, -1] == generation_config.pad_token_id) > 0\n ):\n logger.warning(\n \"A decoder-only architecture is being used, but right-padding was detected! For correct \"\n \"generation results, please set `padding_side='left'` when initializing the tokenizer.\"\n )\n\n if self.config.is_encoder_decoder and \"encoder_outputs\" not in model_kwargs:\n # if model is encoder decoder encoder_outputs are created\n # and added to `model_kwargs`\n model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(\n inputs_tensor, model_kwargs, model_input_name\n )\n\n # 5. Prepare `input_ids` which will be used for auto-regressive generation\n if self.config.is_encoder_decoder:\n input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(\n batch_size=batch_size,\n model_input_name=model_input_name,\n model_kwargs=model_kwargs,\n decoder_start_token_id=generation_config.decoder_start_token_id,\n bos_token_id=generation_config.bos_token_id,\n device=inputs_tensor.device,\n )\n else:\n input_ids = inputs_tensor if model_input_name == \"input_ids\" else model_kwargs.pop(\"input_ids\")\n\n if streamer is not None:\n streamer.put(input_ids.cpu())\n\n # 6. Prepare `max_length` depending on other stopping criteria.\n input_ids_length = input_ids.shape[-1]\n has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n if generation_config.max_new_tokens is not None:\n if not has_default_max_length:\n logger.warning(\n f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n \"Please refer to the documentation for more information. \"\n \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\"\n )\n generation_config.max_length = generation_config.max_new_tokens + input_ids_length\n\n # 7. determine generation mode\n generation_mode = self._get_generation_mode(generation_config, assistant_model)\n\n if streamer is not None and (generation_config.num_beams > 1):\n raise ValueError(\n \"`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1.\"\n )\n\n if self.device.type != input_ids.device.type:\n warnings.warn(\n \"You are calling .generate() with the `input_ids` being on a device type different\"\n f\" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model\"\n f\" is on {self.device.type}. You may experience unexpected behaviors or slower generation.\"\n \" Please make sure that you have put `input_ids` to the\"\n f\" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before\"\n \" running `.generate()`.\",\n UserWarning,\n )\n\n # 8. prepare distribution pre_processing samplers\n logits_processor = self._get_logits_processor(\n generation_config=generation_config,\n input_ids_seq_length=input_ids_length,\n encoder_input_ids=inputs_tensor,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n logits_processor=logits_processor,\n )\n\n # 9. prepare stopping criteria\n stopping_criteria = self._get_stopping_criteria(\n generation_config=generation_config, stopping_criteria=stopping_criteria\n )\n\n decoding_kwargs = generation_config.decoding_kwargs if hasattr(generation_config, 'decoding_kwargs') else {}\n decoding_kwargs['generation_mode'] = generation_mode\n decoding_kwargs['do_sample'] = generation_config.do_sample\n decoding_kwargs['inputs_embeds_position'] = generation_config.inputs_embeds_position if hasattr(generation_config, 'inputs_embeds_position') else 0\n decoding_kwargs['max_length'] = generation_config.max_length\n if generation_mode == GenerationMode.LOOKAHEAD_GENERATION:\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n decoding_kwargs['decoding_max_length'] = generation_config.max_length + decoding_length + 1\n else:\n decoding_kwargs['decoding_max_length'] = generation_config.max_length\n model_kwargs['decoding_kwargs'] = decoding_kwargs\n\n # 10. go into different generation modes\n if generation_mode == GenerationMode.ASSISTED_GENERATION:\n if generation_config.num_return_sequences > 1:\n raise ValueError(\n \"num_return_sequences has to be 1 when doing assisted generate, \"\n f\"but is {generation_config.num_return_sequences}.\"\n )\n if batch_size > 1:\n raise ValueError(\"assisted generate is only supported for batch_size = 1\")\n if not model_kwargs[\"use_cache\"]:\n raise ValueError(\"assisted generate requires `use_cache=True`\")\n\n # 11. If the assistant model is an encoder-decoder, prepare its encoder outputs\n if assistant_model.config.is_encoder_decoder:\n assistant_model_kwargs = copy.deepcopy(model_kwargs)\n inputs_tensor, model_input_name, assistant_model_kwargs = assistant_model._prepare_model_inputs(\n inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_model_kwargs\n )\n assistant_model_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation(\n inputs_tensor, assistant_model_kwargs, model_input_name\n )\n model_kwargs[\"assistant_encoder_outputs\"] = assistant_model_kwargs[\"encoder_outputs\"]\n\n # 12. run assisted generate\n return self.assisted_decoding(\n input_ids,\n assistant_model=assistant_model,\n do_sample=generation_config.do_sample,\n logits_processor=logits_processor,\n logits_warper=self._get_logits_warper(generation_config) if generation_config.do_sample else None,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n if generation_mode == GenerationMode.GREEDY_SEARCH:\n # 11. run greedy search\n return self.greedy_search(\n input_ids,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.LOOKAHEAD_GENERATION:\n # 11. run greedy search\n return self.lookahead_generation(\n input_ids,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.CONTRASTIVE_SEARCH:\n if not model_kwargs[\"use_cache\"]:\n raise ValueError(\"Contrastive search requires `use_cache=True`\")\n\n return self.contrastive_search(\n input_ids,\n top_k=generation_config.top_k,\n penalty_alpha=generation_config.penalty_alpha,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n sequential=generation_config.low_memory,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.SAMPLE:\n # 11. prepare logits warper\n logits_warper = self._get_logits_warper(generation_config)\n\n # 12. expand input_ids with `num_return_sequences` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_return_sequences,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n\n # 13. run sample\n return self.sample(\n input_ids,\n logits_processor=logits_processor,\n logits_warper=logits_warper,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.BEAM_SEARCH:\n # 11. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.beam_search(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.BEAM_SAMPLE:\n # 11. prepare logits warper\n logits_warper = self._get_logits_warper(generation_config)\n\n # 12. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n\n # 13. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n\n # 14. run beam sample\n return self.beam_sample(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n logits_warper=logits_warper,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.GROUP_BEAM_SEARCH:\n # 11. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n num_beam_groups=generation_config.num_beam_groups,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.group_beam_search(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.CONSTRAINED_BEAM_SEARCH:\n final_constraints = []\n if generation_config.constraints is not None:\n final_constraints = generation_config.constraints\n\n if generation_config.force_words_ids is not None:\n\n def typeerror():\n raise ValueError(\n \"`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]`\"\n f\"of positive integers, but is {generation_config.force_words_ids}.\"\n )\n\n if (\n not isinstance(generation_config.force_words_ids, list)\n or len(generation_config.force_words_ids) == 0\n ):\n typeerror()\n\n for word_ids in generation_config.force_words_ids:\n if isinstance(word_ids[0], list):\n if not isinstance(word_ids, list) or len(word_ids) == 0:\n typeerror()\n if any(not isinstance(token_ids, list) for token_ids in word_ids):\n typeerror()\n if any(\n any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)\n for token_ids in word_ids\n ):\n typeerror()\n\n constraint = DisjunctiveConstraint(word_ids)\n else:\n if not isinstance(word_ids, list) or len(word_ids) == 0:\n typeerror()\n if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids):\n typeerror()\n\n constraint = PhrasalConstraint(word_ids)\n final_constraints.append(constraint)\n\n # 11. prepare beam search scorer\n constrained_beam_scorer = ConstrainedBeamSearchScorer(\n constraints=final_constraints,\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.constrained_beam_search(\n input_ids,\n constrained_beam_scorer=constrained_beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n def lookahead_prepare_inputs_for_generation(self,\n input_ids,\n past_key_values=None,\n attention_mask=None,\n inputs_embeds=None,\n **kwargs):\n position_ids = kwargs.get(\"position_ids\", None)\n\n decoding_kwargs = kwargs.get('decoding_kwargs', {})\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n branch_length = decoding_kwargs.get('branch_length', 12)\n decoding_mode = decoding_kwargs.get('decoding_mode', 'hier')\n max_length = decoding_kwargs.get('max_length', 2048)\n update_branch_length = min(branch_length, max_length - input_ids.size(-1))\n assert update_branch_length > 0, f'{branch_length=} {max_length=} {input_ids.size(-1)=} {update_branch_length=}'\n\n if past_key_values is None:\n if inputs_embeds is not None and input_ids is not None:\n model_inputs = {\"inputs_embeds\": inputs_embeds, \"input_ids\": input_ids}\n length = input_ids.size(1)\n elif input_ids is not None:\n model_inputs = {\"input_ids\": input_ids}\n length = input_ids.size(1)\n elif inputs_embeds is not None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n length = input_ids.size(1)\n else:\n raise ValueError('either input_ids or inputs_embeds is not None')\n update_attention_mask = attention_mask[:, :, :length, :length]\n\n model_inputs.update(\n {\"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": update_attention_mask,\n \"decoding_kwargs\": decoding_kwargs\n })\n\n if position_ids is not None:\n model_inputs[\"position_ids\"] = self._get_position_ids(position_ids, encoding=True, length=length)\n\n else:\n decoding_qids = input_ids[0, -2:].tolist()\n # decoding_qids = decoding_kwargs['input_id_list'][0][-2:]\n min_input_size = 0\n min_output_size = max(decoding_length // 2, 1)\n\n if decoding_mode in ('hier', 'par', 'one'):\n decoding_mode = decoding_mode + '_mix'\n fmt, mode = decoding_mode.split('_')\n method_name = fmt + '_get'\n\n decoding_ids, decoding_masks, sizes = getattr(self.lookahead_cache, method_name)(decoding_qids,\n decoding_length=decoding_length,\n branch_length=update_branch_length,\n min_input_size=min_input_size,\n min_output_size=min_output_size,\n mode=mode,\n idx=0)\n\n decoding_input_ids = torch.tensor([decoding_ids], dtype=torch.long, device=input_ids.device)\n prefix_length = input_ids.size(-1) - 1\n fresh_length = len(decoding_ids)\n ppl = prefix_length + fresh_length\n assert ppl <= attention_mask.size(2), \\\n f'{max_length=} {update_branch_length=} {prefix_length=} {fresh_length=} {attention_mask.shape=}'\n prefix_mask_tensor = attention_mask[:, :, prefix_length:ppl, :prefix_length]\n decoding_mask_tensor = torch.from_numpy(decoding_masks[None, None]).to(\n dtype=attention_mask.dtype, device=attention_mask.device)\n decoding_attention_mask = torch.cat([prefix_mask_tensor, decoding_mask_tensor], dim=3)\n\n decoding_kwargs.update({'decoding_qids': decoding_qids,\n 'decoding_ids': decoding_ids,\n 'decoding_masks': decoding_masks,\n 'sizes': sizes,\n })\n model_inputs = {'decoding_kwargs': decoding_kwargs}\n\n model_inputs.update(\n {\n \"input_ids\": decoding_input_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": decoding_attention_mask\n }\n )\n if position_ids is not None:\n indices = torch.sum(decoding_attention_mask, dim=3).squeeze(1)[0]\n model_inputs[\"position_ids\"] = self._get_position_ids(position_ids, indices=indices, encoding=False)\n\n return model_inputs\n\n def _get_position_ids(self, full_position_ids, indices=None, length=None, encoding=True):\n if encoding:\n return full_position_ids[..., :length]\n else:\n return full_position_ids[..., indices]\n\n def _lookahead_update_model_kwargs_for_generation(\n self,\n outputs: ModelOutput,\n model_kwargs: Dict[str, Any],\n is_encoder_decoder: bool = False,\n standardize_cache_format: bool = False,\n logits_processor: Optional[LogitsProcessorList] = None,\n input_ids: Optional[torch.Tensor] = None,\n ) -> Dict[str, Any]:\n # update past_key_values\n model_kwargs[\"past_key_values\"] = self._extract_past_from_model_output(\n outputs, standardize_cache_format=standardize_cache_format\n )\n\n decoding_kwargs = model_kwargs['decoding_kwargs']\n decoding_ids = decoding_kwargs.get('decoding_ids', [])\n if len(decoding_ids) <= 1:\n next_token_logits = outputs.logits[:, -1:, :]\n # pre-process distribution\n # next_tokens_scores = logits_processor(input_ids, next_token_logits)\n bs, nt, nv = next_token_logits.shape\n next_tokens_scores = logits_processor(input_ids, next_token_logits.squeeze(1)).unsqueeze(1)\n\n if decoding_kwargs.get('do_sample', False):\n probs = nn.functional.softmax(next_tokens_scores, dim=-1)\n next_tokens = torch.multinomial(probs.view(bs * nt, nv), num_samples=1).view(bs, nt)\n else:\n next_tokens = torch.argmax(next_tokens_scores, dim=-1, keepdim=False).long()\n model_kwargs['next_tokens'] = next_tokens\n model_kwargs['next_tokens_scores'] = next_tokens_scores\n next_token_list = next_tokens.tolist()\n model_kwargs['next_token_list'] = next_token_list\n decoding_kwargs['input_id_list'][0].extend(next_token_list[0])\n decoding_kwargs['dls'].append(1)\n decoding_kwargs['edls'].append(1)\n if decoding_kwargs.get('debug_lookahead', False):\n decoding_qids = decoding_kwargs.get('decoding_qids', [])\n print(f'size:0 query:{decoding_qids} next_token:{next_token_list[0]}')\n else:\n # TODO: accurate logit_processor\n # next_tokens_scores = logits_processor(input_ids, outputs.logits)\n bs, nt, nv = outputs.logits.shape\n next_tokens_scores = logits_processor(input_ids.repeat(1, nt).view(bs * nt, -1),\n outputs.logits.view(bs * nt, -1)).view(bs, nt, -1)\n\n if decoding_kwargs.get('do_sample', False):\n probs = nn.functional.softmax(next_tokens_scores, dim=-1)\n bs, nt, nv = probs.shape\n next_tokens = torch.multinomial(probs.view(bs * nt, nv), num_samples=1).view(bs, nt)\n else:\n next_tokens = torch.argmax(next_tokens_scores, dim=-1, keepdim=False).long()\n\n next_token_list = next_tokens.tolist()[0]\n decoding_ids = decoding_kwargs['decoding_ids'][1:]\n decoding_mask = decoding_kwargs['decoding_masks']\n sizes = decoding_kwargs['sizes']\n\n max_match_index = 0\n max_match_count = 0\n max_decoding_ids_slice = None\n max_next_token_slice = None\n \n for i in range(len(decoding_ids)):\n mask_indices = np.nonzero(decoding_mask[i + 1, 1:])[0]\n decoding_ids_slice = [decoding_ids[j] for j in mask_indices] \n next_token_slice = [next_token_list[0]] + [next_token_list[j + 1] for j in mask_indices]\n \n c = len(decoding_ids_slice)\n for j, p in enumerate(decoding_ids_slice):\n if next_token_slice[j] != p:\n c = j\n break\n if c > max_match_count:\n max_match_count = c\n max_match_index = i\n if c >= max_match_count:\n max_decoding_ids_slice = decoding_ids_slice\n max_next_token_slice = next_token_slice\n # if decoding_kwargs['eos'] in decoding_ids:\n # max_match_count = 0\n\n prefix_plus_count = input_ids.size(-1)\n match_idx = np.nonzero(decoding_mask[max_match_index + 1, 1:])[0][:max_match_count]\n if len(decoding_ids) != max_match_count:\n past = model_kwargs[\"past_key_values\"]\n device = past[0][0].device\n kv_idx = torch.tensor(match_idx + prefix_plus_count, dtype=torch.long, device=device)\n model_kwargs[\"past_key_values\"] = self._update_cache(past,\n kv_idx,\n prefix_and_next_count=prefix_plus_count,\n max_match_count=max_match_count,\n max_match_index=max_match_index)\n\n next_token_list = [next_token_list[0:1] + [next_token_list[x + 1] for x in match_idx]]\n next_tokens = torch.tensor(next_token_list, dtype=torch.long, device=input_ids.device)\n model_kwargs['next_tokens'] = next_tokens\n model_kwargs['next_token_list'] = next_token_list\n decoding_kwargs['input_id_list'][0].extend(next_token_list[0])\n decoding_kwargs['dls'].append(len(decoding_ids))\n decoding_kwargs['edls'].append(max_match_count + 1)\n if decoding_kwargs.get('debug_lookahead', False):\n lengths = np.sum(decoding_mask, axis=1) - 1\n l = np.concatenate([lengths[:-1][(lengths[1:] - lengths[:-1]) <= 0], lengths[-1:]], axis=0)\n ls = ','.join(l.astype(np.str_))\n decoding_qids = decoding_kwargs['decoding_qids']\n size_str = ','.join([str(x) for x in sizes])\n print(\n f'decoding_length:{len(decoding_ids)+1} accept_length:{max_match_count+1} '\n f'query:{decoding_qids} source:{size_str} lengths:{ls} index:{max_match_index} '\n f'branch_token:{max_decoding_ids_slice} next_token:{max_next_token_slice}')\n\n return model_kwargs\n\n def _update_cache(self, past_key_values, kv_idx, prefix_and_next_count=None, max_match_count=None,\n max_match_index=None):\n update_past_key_values = []\n for k, v in past_key_values:\n if max_match_index + 1 == max_match_count:\n k = k[:, :, :prefix_and_next_count + max_match_count]\n v = v[:, :, :prefix_and_next_count + max_match_count]\n else:\n k = torch.concat([k[:, :, :prefix_and_next_count], k[:, :, kv_idx]], 2)\n v = torch.concat([v[:, :, :prefix_and_next_count], v[:, :, kv_idx]], 2)\n update_past_key_values.append((k, v))\n return tuple(update_past_key_values)\n\n def lookahead_generation(\n self,\n input_ids: torch.LongTensor,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n max_length: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n eos_token_id: Optional[Union[int, List[int]]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n output_scores: Optional[bool] = None,\n return_dict_in_generate: Optional[bool] = None,\n synced_gpus: bool = False,\n streamer: Optional[\"BaseStreamer\"] = None,\n **model_kwargs,\n ) -> Union[GreedySearchOutput, torch.LongTensor]:\n r\"\"\"\n Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be\n used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.\n\n <Tip warning={true}>\n\n In most cases, you do not need to call [`~generation.GenerationMixin.greedy_search`] directly. Use generate()\n instead. For an overview of generation strategies and code examples, check the [following\n guide](../generation_strategies).\n\n </Tip>\n\n\n Parameters:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n The sequence used as a prompt for the generation.\n logits_processor (`LogitsProcessorList`, *optional*):\n An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]\n used to modify the prediction scores of the language modeling head applied at each generation step.\n stopping_criteria (`StoppingCriteriaList`, *optional*):\n An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]\n used to tell if the generation loop should stop.\n\n max_length (`int`, *optional*, defaults to 20):\n **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated\n tokens. The maximum length of the sequence to be generated.\n pad_token_id (`int`, *optional*):\n The id of the *padding* token.\n eos_token_id (`Union[int, List[int]]`, *optional*):\n The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.\n output_attentions (`bool`, *optional*, defaults to `False`):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more details.\n output_hidden_states (`bool`, *optional*, defaults to `False`):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more details.\n output_scores (`bool`, *optional*, defaults to `False`):\n Whether or not to return the prediction scores. See `scores` under returned tensors for more details.\n return_dict_in_generate (`bool`, *optional*, defaults to `False`):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n synced_gpus (`bool`, *optional*, defaults to `False`):\n Whether to continue running the while loop until max_length (needed for ZeRO stage 3)\n streamer (`BaseStreamer`, *optional*):\n Streamer object that will be used to stream the generated sequences. Generated tokens are passed\n through `streamer.put(token_ids)` and the streamer is responsible for any further processing.\n model_kwargs:\n Additional model specific keyword arguments will be forwarded to the `forward` function of the model.\n If model is an encoder-decoder model the kwargs should include `encoder_outputs`.\n\n Return:\n [`~generation.GreedySearchDecoderOnlyOutput`], [`~generation.GreedySearchEncoderDecoderOutput`] or\n `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a\n [`~generation.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and\n `return_dict_in_generate=True` or a [`~generation.GreedySearchEncoderDecoderOutput`] if\n `model.config.is_encoder_decoder=True`.\n\n Examples:\n\n ```python\n >>> from transformers import (\n ... AutoTokenizer,\n ... AutoModelForCausalLM,\n ... LogitsProcessorList,\n ... MinLengthLogitsProcessor,\n ... StoppingCriteriaList,\n ... MaxLengthCriteria,\n ... )\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n\n >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token\n >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id\n\n >>> input_prompt = \"It might be possible to\"\n >>> input_ids = tokenizer(input_prompt, return_tensors=\"pt\").input_ids\n\n >>> # instantiate logits processors\n >>> logits_processor = LogitsProcessorList(\n ... [\n ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id),\n ... ]\n ... )\n >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])\n\n >>> outputs = model.greedy_search(\n ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria\n ... )\n\n >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)\n [\"It might be possible to get a better understanding of the nature of the problem, but it's not\"]\n ```\"\"\"\n # init values\n\n if not hasattr(self, 'lookahead_cache'):\n self.lookahead_cache = LookaheadCache()\n\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n if max_length is not None:\n warnings.warn(\n \"`max_length` is deprecated in this function, use\"\n \" `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.\",\n UserWarning,\n )\n stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)\n pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id\n if isinstance(eos_token_id, int):\n eos_token_id = [eos_token_id]\n eos_token_id_tensor = torch.tensor(eos_token_id, device=input_ids.device) if eos_token_id is not None else None\n output_scores = output_scores if output_scores is not None else self.generation_config.output_scores\n output_attentions = (\n output_attentions if output_attentions is not None else self.generation_config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states\n )\n return_dict_in_generate = (\n return_dict_in_generate\n if return_dict_in_generate is not None\n else self.generation_config.return_dict_in_generate\n )\n\n # init attention / hidden states / scores tuples\n scores = () if (return_dict_in_generate and output_scores) else None\n decoder_attentions = () if (return_dict_in_generate and output_attentions) else None\n cross_attentions = () if (return_dict_in_generate and output_attentions) else None\n decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None\n\n # if model is an encoder-decoder, retrieve encoder attention weights and hidden states\n if return_dict_in_generate and self.config.is_encoder_decoder:\n encoder_attentions = model_kwargs[\"encoder_outputs\"].get(\"attentions\") if output_attentions else None\n encoder_hidden_states = (\n model_kwargs[\"encoder_outputs\"].get(\"hidden_states\") if output_hidden_states else None\n )\n\n decoding_kwargs = model_kwargs['decoding_kwargs']\n decoding_kwargs.update({\n 'eos': eos_token_id[0] if eos_token_id is not None else 2,\n 'edls': [],\n 'dls': [],\n 'fts': []\n })\n\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n stop_max_length = stopping_criteria.max_length\n decoding_max_length = stop_max_length + decoding_length + 1\n attention_mask = model_kwargs.get('attention_mask', None)\n input_device = input_ids.device\n if attention_mask is None:\n bs = input_ids.size(0)\n full_attention_mask = torch.tril(\n torch.ones((bs, 1, decoding_max_length, decoding_max_length), dtype=torch.long, device=input_device),\n 0)\n elif len(attention_mask.shape) == 2:\n # from [bs, src_len] to [bs,1,max_len,max_len]\n bs, src_len = attention_mask.shape\n pad_len = decoding_max_length - src_len\n attention_mask = attention_mask.long()\n if pad_len > 0:\n pad_mask = torch.ones((bs, pad_len), dtype=torch.long, device=attention_mask.device)\n attention_mask = torch.cat([attention_mask, pad_mask], 1)\n full_attention_mask = torch.tril(attention_mask[:, None, None].expand(-1, -1, decoding_max_length, -1), 0)\n elif len(attention_mask.shape) == 4:\n bs, _, src_len, tgt_len = attention_mask.shape\n attention_mask = attention_mask.long()\n if src_len < decoding_max_length or tgt_len < decoding_max_length:\n full_attention_mask = torch.tril(\n torch.ones((bs, 1, decoding_max_length, decoding_max_length), dtype=torch.long,\n device=input_device),\n 0)\n full_attention_mask[:, :, :src_len, :tgt_len] = attention_mask\n else:\n full_attention_mask = attention_mask\n else:\n raise ValueError(f'unsupport attention_mask.shape:{attention_mask.shape}')\n model_kwargs['attention_mask'] = full_attention_mask\n decoding_kwargs['max_length'] = stop_max_length\n decoding_kwargs['decoding_max_length'] = decoding_max_length\n\n # keep track of which sequences are already finished\n unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device)\n\n assert input_ids.size(0) == 1\n input_id_list = input_ids[0].tolist()\n decoding_kwargs['input_id_list'] = [input_id_list]\n branch_length = decoding_kwargs.get('branch_length', 12)\n self.lookahead_cache.put(input_id_list[1:], branch_length=branch_length + 1, mode='input', idx=0)\n ts = time.time()\n\n this_peer_finished = False # used by synced_gpus only\n while True:\n if synced_gpus:\n # Under synced_gpus the `forward` call must continue until all gpus complete their sequence.\n # The following logic allows an early break if all peers finished generating their sequence\n this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)\n # send 0.0 if we finished, 1.0 otherwise\n dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)\n # did all peers finish? the reduced sum will be 0.0 then\n if this_peer_finished_flag.item() == 0.0:\n break\n\n # prepare model inputs\n model_inputs = self.lookahead_prepare_inputs_for_generation(input_ids, **model_kwargs)\n decoding_kwargs = model_inputs.pop('decoding_kwargs', {})\n\n # forward pass to get next token\n outputs = self(\n **model_inputs,\n return_dict=True,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n if synced_gpus and this_peer_finished:\n continue # don't waste resources running the code we don't need\n\n model_kwargs['decoding_kwargs'] = decoding_kwargs\n model_kwargs = self._lookahead_update_model_kwargs_for_generation(\n outputs,\n model_kwargs,\n is_encoder_decoder=self.config.is_encoder_decoder,\n input_ids=input_ids,\n logits_processor=logits_processor\n )\n\n next_tokens = model_kwargs['next_tokens']\n next_tokens_scores = model_kwargs['next_tokens_scores']\n next_token_list = model_kwargs['next_token_list']\n\n # finished sentences should have their next token be a padding token\n if eos_token_id is not None:\n if pad_token_id is None:\n raise ValueError(\"If `eos_token_id` is defined, make sure that `pad_token_id` is defined.\")\n next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)\n\n # update generated ids, model inputs, and length for next step\n input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n if streamer is not None:\n streamer.put(next_token_list)\n\n self.lookahead_cache.stream_put(next_token_list[0], branch_length=branch_length + 1, final=False,\n mode='output', idx=0)\n\n # Store scores, attentions and hidden_states when required\n if return_dict_in_generate:\n if output_scores:\n scores += (next_tokens_scores,)\n if output_attentions:\n decoder_attentions += (\n (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)\n )\n if self.config.is_encoder_decoder:\n cross_attentions += (outputs.cross_attentions,)\n\n if output_hidden_states:\n decoder_hidden_states += (\n (outputs.decoder_hidden_states,)\n if self.config.is_encoder_decoder\n else (outputs.hidden_states,)\n )\n\n # if eos_token was found in one sentence, set sentence to finished\n if eos_token_id_tensor is not None:\n # unfinished_sequences = unfinished_sequences.mul(\n # next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)\n # )\n unfinished_sequences = unfinished_sequences.mul(\n next_tokens[:, :, None].ne(eos_token_id_tensor).prod(dim=2).prod(dim=1))\n\n # stop when each sentence is finished\n if unfinished_sequences.max() == 0:\n this_peer_finished = True\n\n # stop if we exceed the maximum length\n if stopping_criteria(input_ids, scores):\n this_peer_finished = True\n\n te = time.time()\n model_kwargs['decoding_kwargs']['fts'].append(te - ts)\n ts = te\n if this_peer_finished and not synced_gpus:\n self.lookahead_cache.stream_put([], branch_length=branch_length + 1, final=True,\n mode='output', idx=0)\n break\n\n if streamer is not None:\n streamer.end()\n\n if return_dict_in_generate:\n if self.config.is_encoder_decoder:\n return GreedySearchEncoderDecoderOutput(\n sequences=input_ids,\n scores=scores,\n encoder_attentions=encoder_attentions,\n encoder_hidden_states=encoder_hidden_states,\n decoder_attentions=decoder_attentions,\n cross_attentions=cross_attentions,\n decoder_hidden_states=decoder_hidden_states,\n )\n else:\n kwargs = {'dls': model_kwargs['decoding_kwargs']['dls'],\n 'edls': model_kwargs['decoding_kwargs']['edls'],\n 'fts': model_kwargs['decoding_kwargs']['fts']}\n return LookaheadDecoderOnlyOutput(\n sequences=input_ids,\n scores=scores,\n attentions=decoder_attentions,\n hidden_states=decoder_hidden_states,\n kwargs=kwargs\n )\n else:\n return input_ids\n\n def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):\n \"\"\"Validates model kwargs for generation. Generate argument typos will also be caught here.\"\"\"\n # Excludes arguments that are handled before calling any model function\n if self.config.is_encoder_decoder:\n for key in [\"decoder_input_ids\"]:\n model_kwargs.pop(key, None)\n\n unused_model_args = []\n model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)\n # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If\n # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)\n if \"kwargs\" in model_args or \"model_kwargs\" in model_args:\n model_args |= set(inspect.signature(self.forward).parameters)\n\n # Encoder-Decoder models may also need Encoder arguments from `model_kwargs`\n if self.config.is_encoder_decoder:\n base_model = getattr(self, self.base_model_prefix, None)\n\n # allow encoder kwargs\n encoder = getattr(self, \"encoder\", None)\n # `MusicgenForConditionalGeneration` has `text_encoder` and `audio_encoder`.\n # Also, it has `base_model_prefix = \"encoder_decoder\"` but there is no `self.encoder_decoder`\n # TODO: A better way to handle this.\n if encoder is None and base_model is not None:\n encoder = getattr(base_model, \"encoder\", None)\n\n if encoder is not None:\n encoder_model_args = set(inspect.signature(encoder.forward).parameters)\n model_args |= encoder_model_args\n\n # allow decoder kwargs\n decoder = getattr(self, \"decoder\", None)\n if decoder is None and base_model is not None:\n decoder = getattr(base_model, \"decoder\", None)\n\n if decoder is not None:\n decoder_model_args = set(inspect.signature(decoder.forward).parameters)\n model_args |= {f\"decoder_{x}\" for x in decoder_model_args}\n\n decoding_kwargs = ['decoding_kwargs','stop_words_ids']\n for key, value in model_kwargs.items():\n if value is not None and key not in model_args and key not in decoding_kwargs:\n unused_model_args.append(key)\n\n if unused_model_args:\n raise ValueError(\n f\"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the\"\n \" generate arguments will also show up in this list)\"\n )" }, { "identifier": "BaichuanConfig", "path": "pia/lookahead/models/baichuan/configuration_baichuan.py", "snippet": "class BaichuanConfig(PretrainedConfig):\n model_type = \"baichuan\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=125696,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n hidden_act=\"silu\",\n max_position_embeddings=4096,\n initializer_range=0.02,\n rms_norm_eps=1e-6,\n use_cache=True,\n pad_token_id=0,\n bos_token_id=1,\n eos_token_id=2,\n tie_word_embeddings=False,\n z_loss_weight=0,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.use_cache = use_cache\n self.z_loss_weight = z_loss_weight\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n tie_word_embeddings=tie_word_embeddings,\n **kwargs,\n )" }, { "identifier": "build_chat_input", "path": "pia/lookahead/models/baichuan/generation_utils.py", "snippet": "def build_chat_input(model, tokenizer, messages: List[dict], max_new_tokens: int = 0):\n def _parse_messages(messages, split_role=\"user\"):\n system, rounds = \"\", []\n round = []\n for i, message in enumerate(messages):\n if message[\"role\"] == \"system\":\n assert i == 0\n system = message[\"content\"]\n continue\n if message[\"role\"] == split_role and round:\n rounds.append(round)\n round = []\n round.append(message)\n if round:\n rounds.append(round)\n return system, rounds\n\n max_new_tokens = max_new_tokens or model.generation_config.max_new_tokens\n max_input_tokens = model.config.model_max_length - max_new_tokens\n system, rounds = _parse_messages(messages, split_role=\"user\")\n system_tokens = tokenizer.encode(system)\n max_history_tokens = max_input_tokens - len(system_tokens)\n\n history_tokens = []\n for round in rounds[::-1]:\n round_tokens = []\n for message in round:\n if message[\"role\"] == \"user\":\n round_tokens.append(model.generation_config.user_token_id)\n else:\n round_tokens.append(model.generation_config.assistant_token_id)\n round_tokens.extend(tokenizer.encode(message[\"content\"]))\n if len(history_tokens) == 0 or len(history_tokens) + len(round_tokens) <= max_history_tokens:\n history_tokens = round_tokens + history_tokens # concat left\n if len(history_tokens) < max_history_tokens:\n continue\n break\n\n input_tokens = system_tokens + history_tokens\n if messages[-1][\"role\"] != \"assistant\":\n input_tokens.append(model.generation_config.assistant_token_id)\n input_tokens = input_tokens[-max_input_tokens:] # truncate left\n return torch.LongTensor([input_tokens]).to(model.device)" }, { "identifier": "TextIterStreamer", "path": "pia/lookahead/models/baichuan/generation_utils.py", "snippet": "class TextIterStreamer:\n def __init__(self, tokenizer, skip_prompt=False, skip_special_tokens=False):\n self.tokenizer = tokenizer\n self.skip_prompt = skip_prompt\n self.skip_special_tokens = skip_special_tokens\n self.tokens = []\n self.text_queue = Queue()\n self.next_tokens_are_prompt = True\n\n def put(self, value):\n if self.skip_prompt and self.next_tokens_are_prompt:\n self.next_tokens_are_prompt = False\n else:\n if len(value.shape) > 1:\n value = value[0]\n self.tokens.extend(value.tolist())\n self.text_queue.put(\n self.tokenizer.decode(self.tokens, skip_special_tokens=self.skip_special_tokens))\n\n def end(self):\n self.text_queue.put(None)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n value = self.text_queue.get()\n if value is None:\n raise StopIteration()\n else:\n return value" } ]
import math import os import torch import torch.utils.checkpoint from contextlib import contextmanager from threading import Thread from typing import List, Optional, Tuple, Union from torch import nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from transformers import PretrainedConfig from transformers.activations import ACT2FN from transformers.generation.utils import GenerationConfig from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.utils import logging, ContextManagers from pia.lookahead.common.pretrained_model import LookaheadPreTrainedModel from pia.lookahead.models.baichuan.configuration_baichuan import BaichuanConfig from pia.lookahead.models.baichuan.generation_utils import build_chat_input, TextIterStreamer from xformers import ops as xops from .quantizer import quantize_offline, init_model_weight_int4 from .quantizer import init_model_weight_int4 from accelerate import init_empty_weights, dispatch_model, infer_auto_device_map from accelerate.utils import CustomDtype from accelerate.utils import get_balanced_memory from .quantizer import quantize_online
17,573
if device_map is not None: dispatch_model(model, device_map=device_map) return model return super(BaichuanForCausalLM, cls).from_pretrained(pretrained_model_name_or_path, *model_args, config=config, cache_dir=cache_dir, ignore_mismatched_sizes=ignore_mismatched_sizes, force_download=force_download, local_files_only=local_files_only, token=token, revision=revision, use_safetensors=use_safetensors, **kwargs) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) softmax_normalizer = shift_logits.max(-1).values ** 2 z_loss = self.config.z_loss_weight * softmax_normalizer.mean() # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) + z_loss if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past def quantize(self, bits: int): try: except ImportError: raise ImportError(f"Needs QLinear to run quantize.") return quantize_online(self, bits) def chat(self, tokenizer, messages: List[dict], stream=False, generation_config: Optional[GenerationConfig] = None): generation_config = generation_config or self.generation_config
# Copyright 2023 Baichuan Inc. All Rights Reserved. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) try: except ImportError: xops = None logger.warning( "Xformers is not installed correctly. If you want to use memory_efficient_attention to accelerate training use the following command to install Xformers\npip install xformers." ) # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ if len(mask.size()) == 3: bsz, src_len, _ = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, :, :].expand(bsz, 1, tgt_len, src_len).to(dtype) else: bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states class RotaryEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) self.max_seq_len_cached = max_position_embeddings t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32) self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. if seq_len > self.max_seq_len_cached: self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32).to(x.device) self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32).to(x.device) elif self.cos_cached.device != x.device: self.cos_cached = self.cos_cached.to(x.device) self.sin_cached = self.sin_cached.to(x.device) return ( self.cos_cached[:, :, :seq_len, ...], self.sin_cached[:, :, :seq_len, ...], ) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos_, sin_, position_ids): cos = cos_.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin_.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q.float() * cos) + (rotate_half(q.float()) * sin) k_embed = (k.float() * cos) + (rotate_half(k.float()) * sin) return q_embed.to(q.dtype), k_embed.to(k.dtype) class MLP(nn.Module): def __init__( self, hidden_size: int, intermediate_size: int, hidden_act: str, ): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.act_fn = ACT2FN[hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: BaichuanConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() proj = self.W_pack(hidden_states) proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2) query_states = proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None if xops is not None and self.training: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask() ) else: with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True): attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class DecoderLayer(nn.Module): def __init__(self, config: BaichuanConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Attention(config=config) self.mlp = MLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class BaichuanPreTrainedModel(LookaheadPreTrainedModel): config_class = BaichuanConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["DecoderLayer"] _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, BaichuanModel): module.gradient_checkpointing = value class BaichuanModel(BaichuanPreTrainedModel): def __init__(self, config: BaichuanConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions # Note: adapt for lookahead if attention_mask is not None and len(attention_mask.shape) == 4: # lookahead # attention_mask: [bs, 1, src_len, tgt_len] position_ids = torch.sum(attention_mask, dim=-1).squeeze(1) - 1 attention_mask = (1.0-attention_mask.to(inputs_embeds.dtype)) * torch.finfo(inputs_embeds.dtype).min else: # non-lookahead if attention_mask is None: attention_mask = torch.ones( (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device ) attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, None) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, position_ids, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class NormHead(nn.Module): def __init__(self, hidden_size, vocab_size, bias=False): super().__init__() self.weight = nn.Parameter(torch.empty((vocab_size, hidden_size))) nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) self.first_flag = True def forward(self, hidden_states): if self.training: norm_weight = nn.functional.normalize(self.weight) elif self.first_flag: self.first_flag = False self.weight = nn.Parameter(nn.functional.normalize(self.weight)) norm_weight = self.weight else: norm_weight = self.weight return nn.functional.linear(hidden_states, norm_weight) _init_weights = True @contextmanager def no_init_weights(_enable=True): global _init_weights old_init_weights = _init_weights if _enable: _init_weights = False try: yield finally: _init_weights = old_init_weights class BaichuanForCausalLM(BaichuanPreTrainedModel): def __init__(self, config, *model_args, **model_kwargs): super().__init__(config) self.model = BaichuanModel(config) self.lm_head = NormHead(config.hidden_size, config.vocab_size, bias=False) if hasattr(config, "quantization_config") and config.quantization_config['load_in_4bit']: try: except ImportError: raise ImportError(f"Needs QLinear to run quantize.") quantize_offline(self, 4) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, cache_dir: Optional[Union[str, os.PathLike]] = None, ignore_mismatched_sizes: bool = False, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", use_safetensors: bool = None, **kwargs, ): # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=False, proxies=None, local_files_only=local_files_only, token=token, revision=revision, subfolder="", _from_auto=False, _from_pipeline=None, **kwargs, ) else: model_kwargs = kwargs if hasattr(config, "quantization_config") and config.quantization_config['load_in_4bit']: try: except ImportError: raise ImportError(f"Needs import model weight init func to run quantize.") # Instantiate model. init_contexts = [no_init_weights(_enable=True)] init_contexts.append(init_empty_weights()) with ContextManagers(init_contexts): model = cls(config) model_file = os.path.join(pretrained_model_name_or_path, 'pytorch_model.bin') state_dict = torch.load(model_file, map_location="cpu") model.is_quantized = True device_map = kwargs.pop("device_map", None) torch_dtype = kwargs.pop("torch_dtype", None) kwargs = {"no_split_module_classes": model._no_split_modules} target_dtype = CustomDtype.INT4 max_memory = get_balanced_memory( model, dtype=target_dtype, low_zero=(device_map == "balanced_low_0"), max_memory=None, **kwargs, ) kwargs["max_memory"] = max_memory device_map = infer_auto_device_map(model, dtype=target_dtype, **kwargs) model = init_model_weight_int4(config, model, state_dict) # Set model in evaluation mode to deactivate DropOut modules by default model.eval() # If it is a model with generation capabilities, attempt to load the generation config if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained( pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=False, proxies=None, local_files_only=local_files_only, token=token, revision=revision, subfolder="", _from_auto=False, _from_pipeline=None, **kwargs, ) except (OSError, TypeError): logger.info( "Generation config file not found, using a generation config created from the model config." ) pass if device_map is not None: dispatch_model(model, device_map=device_map) return model return super(BaichuanForCausalLM, cls).from_pretrained(pretrained_model_name_or_path, *model_args, config=config, cache_dir=cache_dir, ignore_mismatched_sizes=ignore_mismatched_sizes, force_download=force_download, local_files_only=local_files_only, token=token, revision=revision, use_safetensors=use_safetensors, **kwargs) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) softmax_normalizer = shift_logits.max(-1).values ** 2 z_loss = self.config.z_loss_weight * softmax_normalizer.mean() # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) + z_loss if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past def quantize(self, bits: int): try: except ImportError: raise ImportError(f"Needs QLinear to run quantize.") return quantize_online(self, bits) def chat(self, tokenizer, messages: List[dict], stream=False, generation_config: Optional[GenerationConfig] = None): generation_config = generation_config or self.generation_config
input_ids = build_chat_input(self, tokenizer, messages, generation_config.max_new_tokens)
2
2023-12-19 13:11:38+00:00
24k
MingtaoGuo/AnimateAnyone_unofficial
aldm/aldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "SpatialTransformerPlus", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformerPlus(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True, use_temporal_attention=False):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n self.spatial_attn = SpatialSelfAttention(in_channels)\n if use_temporal_attention:\n self.temporal_attn = TemporalTransformer(in_channels)\n\n def forward(self, x, context=None, ref=None):\n x = torch.cat([x, ref], dim=-1)\n x = self.spatial_attn(x)\n x = x[..., :ref.shape[-1]]\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "ResBlock", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "def convert_module_to_f16(x):\ndef convert_module_to_f32(x):\n def __init__(\n self,\n spacial_dim: int,\n embed_dim: int,\n num_heads_channels: int,\n output_dim: int = None,\n ):\n def forward(self, x):\n def forward(self, x, emb):\n def forward(self, x, emb, context=None):\n def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):\n def forward(self, x):\n def __init__(self, channels, out_channels=None, ks=5):\n def forward(self,x):\n def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):\n def forward(self, x):\n def __init__(\n self,\n channels,\n emb_channels,\n dropout,\n out_channels=None,\n use_conv=False,\n use_scale_shift_norm=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n def forward(self, x, emb):\n def _forward(self, x, emb):\n def __init__(\n self,\n channels,\n dropout,\n out_channels=None,\n use_conv=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n def forward(self, x):\n def _forward(self, x):\n def __init__(\n self,\n channels,\n num_heads=1,\n num_head_channels=-1,\n use_checkpoint=False,\n use_new_attention_order=False,\n ):\n def forward(self, x):\n def _forward(self, x):\ndef count_flops_attn(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n def convert_to_fp16(self):\n def convert_to_fp32(self):\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\nclass AttentionPool2d(nn.Module):\nclass TimestepBlock(nn.Module):\nclass TimestepEmbedSequential(nn.Sequential, TimestepBlock):\nclass Upsample(nn.Module):\nclass TransposedUpsample(nn.Module):\nclass Downsample(nn.Module):\nclass ResBlock(TimestepBlock):\nclass ResBlockNoTime(TimestepBlock):\nclass AttentionBlock(nn.Module):\nclass QKVAttentionLegacy(nn.Module):\nclass QKVAttention(nn.Module):\nclass UNetModel(nn.Module):" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n force_null_conditioning=False,\n *args, **kwargs):\n self.force_null_conditioning = force_null_conditioning\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs['timesteps']\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = 'concat' if concat_mode else 'crossattn'\n if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n reset_ema = kwargs.pop(\"reset_ema\", False)\n reset_num_ema_updates = kwargs.pop(\"reset_num_ema_updates\", False)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer('scale_factor', torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n if reset_ema:\n assert self.use_ema\n print(\n f\"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.\")\n self.model_ema = LitEma(self.model)\n if reset_num_ema_updates:\n print(\" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ \")\n assert self.use_ema\n self.model_ema.reset_num_updates()\n\n def make_cond_schedule(self, ):\n self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)\n ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()\n self.cond_ids[:self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:\n assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer('scale_factor', 1. / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(self,\n given_betas=None, beta_schedule=\"linear\", timesteps=1000,\n linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != '__is_first_stage__'\n assert config != '__is_unconditional__'\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(self.decode_first_stage(zd.to(self.device),\n force_not_quantize=force_no_decoder_quantization))\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')\n denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\")\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(weighting, self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"], )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"])\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1, padding=0,\n stride=(stride[0] * uf, stride[1] * uf))\n fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))\n\n elif df > 1 and uf == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1, padding=0,\n stride=(stride[0] // df, stride[1] // df))\n fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,\n cond_key=None, return_original_cond=False, bs=None, return_x=False):\n x = super().get_input(batch, k)\n if bs is not None:\n x = x[:bs]\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n\n if self.model.conditioning_key is not None and not self.force_null_conditioning:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in ['caption', 'coordinates_bbox', 'txt', 'vision']:\n xc = batch[cond_key]\n xc = rearrange(xc, 'b h w c -> b c h w')\n elif cond_key in ['class_label', 'cls']:\n xc = batch\n else:\n xc = super().get_input(batch, cond_key).to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n if bs is not None:\n c = c[:bs]\n\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n ckey = __conditioning_keys__[self.model.conditioning_key]\n c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {'pos_x': pos_x, 'pos_y': pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_x:\n out.extend([x])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, 'b h w c -> b c h w').contiguous()\n\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n if self.cond_stage_trainable:\n c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is expected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \\\n extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n \n loss_dict = {}\n prefix = 'train' if self.training else 'val'\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"v\":\n target = self.get_v(x_start, noise, t)\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})\n loss_dict.update({'logvar': self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})\n loss += (self.original_elbo_weight * loss_vlb)\n loss_dict.update({f'{prefix}/loss': loss})\n return loss, loss_dict\n\n def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,\n return_x0=False, score_corrector=None, corrector_kwargs=None):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1., 1.)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,\n return_codebook_ids=False, quantize_denoised=False, return_x0=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,\n img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,\n score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,\n log_every_t=None):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',\n total=timesteps) if verbose else reversed(\n range(0, timesteps))\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised, return_x0=True,\n temperature=temperature[i], noise_dropout=noise_dropout,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(self, cond, shape, return_intermediates=False,\n x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, start_T=None,\n log_every_t=None):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(\n range(0, timesteps))\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised)\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,\n verbose=True, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, shape=None, **kwargs):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n return self.p_sample_loop(cond,\n shape,\n return_intermediates=return_intermediates, x_T=x_T,\n verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,\n mask=mask, x0=x0)\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,\n shape, cond, verbose=False, **kwargs)\n\n else:\n samples, intermediates = self.sample(cond=cond, batch_size=batch_size,\n return_intermediates=True, **kwargs)\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, batch_size, null_label=None):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n if self.cond_stage_key in [\"class_label\", \"cls\"]:\n xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)\n return self.get_learned_conditioning(xc)\n else:\n raise NotImplementedError(\"todo\")\n if isinstance(c, list): # in case the encoder gives us a list\n for i in range(len(c)):\n c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)\n else:\n c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)\n return c\n\n @torch.no_grad()\n def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N)\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in ['class_label', \"cls\"]:\n try:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[\"human_label\"], size=x.shape[2] // 25)\n log['conditioning'] = xc\n except KeyError:\n # probably no \"human_label\" in batch\n pass\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(\n self.first_stage_model, IdentityFirstStage):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n quantize_denoised=True)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)\n if self.model.conditioning_key == \"crossattn-adm\":\n uc = {\"c_crossattn\": [uc], \"c_adm\": c[\"c_adm\"]}\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1. - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N)\n prog_row = self._get_denoise_row_from_list(progressives, desc=\"Progressive Generation\")\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.model.parameters())\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print('Diffusion model optimizing logvar')\n params.append(self.logvar)\n opt = torch.optim.AdamW(params, lr=lr)\n if self.use_scheduler:\n assert 'target' in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),\n 'interval': 'step',\n 'frequency': 1\n }]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer, SpatialTransformerPlus from ldm.modules.diffusionmodules.openaimodel import ResBlock, TimestepEmbedSequential, Downsample, AttentionBlock, Upsample, normalization, checkpoint, convert_module_to_f16, convert_module_to_f32 from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig from omegaconf.listconfig import ListConfig
19,213
use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerPlus( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, use_temporal_attention=use_temporal_attention ), ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerPlus( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, use_temporal_attention=use_temporal_attention ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential(
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ), ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None,**kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" refs = [] hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x.type(self.dtype) # --------- input_block --------- for module in self.input_blocks: for sub_m in module: if isinstance(sub_m, ResBlock): h = sub_m(h, emb) elif isinstance(sub_m, SpatialTransformer): refs.append(h) # push features into refs before cross attention module h = sub_m(h, context) else: h = sub_m(h) hs.append(h) # --------- middle_block --------- for sub_m in self.middle_block: if isinstance(sub_m, ResBlock): h = sub_m(h, emb) elif isinstance(sub_m, SpatialTransformer): refs.append(h) # push features into refs before cross attention module h = sub_m(h, context) else: h = sub_m(h) # --------- output_block --------- for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) for sub_m in module: if isinstance(sub_m, ResBlock): h = sub_m(h, emb) elif isinstance(sub_m, SpatialTransformer): refs.append(h) # push features into refs before cross attention module h = sub_m(h, context) else: h = sub_m(h) return refs class ReferenceUNetModel(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_temporal_attention=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, frames=24, # temporal length ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: if isinstance(self.num_classes, int): self.label_emb = nn.Embedding(num_classes, time_embed_dim) elif self.num_classes == "continuous": print("setting up linear c_adm embedding layer") self.label_emb = nn.Linear(1, time_embed_dim) else: raise ValueError() self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerPlus( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, use_temporal_attention=use_temporal_attention ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerPlus( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, use_temporal_attention=use_temporal_attention ), ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerPlus( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, use_temporal_attention=use_temporal_attention ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential(
normalization(ch),
6
2023-12-16 03:31:33+00:00
24k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = (\n \"MultiScaleMaskedTransformerDecoder\"\n )\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\n \"res3\",\n \"res4\",\n \"res5\",\n ]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # Resizing disabled for Synthia\n cfg.INPUT.RESIZE = CN()\n cfg.INPUT.RESIZE.ENABLED = True\n cfg.INPUT.RESIZE.SIZE_TRAIN = (1280, 720)\n\n # Saving Pseudo Labels during test time\n cfg.MODEL.SAVE_PSEUDO_LABELS = False\n\n # for the Dataset repeat factor\n # cfg.DATASETS.TRAIN_REPEAT_FACTOR = [(\"sd_v99\",5.0), (\"cityscapes_train\",1.0)]" }, { "identifier": "add_clouds_config", "path": "clouds/config.py", "snippet": "def add_clouds_config(cfg):\n # CLOUDS model config\n cfg.MODEL.CLOUDS = CN()\n cfg.MODEL.CLOUDS.CLIP_MODEL_NAME = \"convnext_large_d_320\"\n cfg.MODEL.CLOUDS.CLIP_PRETRAINED_WEIGHTS = \"laion2b_s29b_b131k_ft_soup\"\n cfg.MODEL.CLOUDS.EMBED_DIM = 768\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_ALPHA = 0.4\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_BETA = 0.8\n cfg.MODEL.CLOUDS.ENSEMBLE_ON_VALID_MASK = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_EMA = False\n cfg.MODEL.CLOUDS.SAM = CN()\n cfg.MODEL.CLOUDS.SAM.ENABLED = False\n cfg.MODEL.CLOUDS.SAM.MOBILE = True\n cfg.MODEL.CLOUDS.SAM.MINIBATCH = False\n cfg.MODEL.CLOUDS.SAM.SIZE_THRESHOLD = 5000\n cfg.MODEL.CLOUDS.SAM.EROSION = False\n cfg.MODEL.CLOUDS.SAM.EROSION_SIZE = 3\n cfg.MODEL.CLOUDS.SAM.NUM_POINTS = 5\n cfg.MODEL.CLOUDS.SAM.SELECTION_MODE = \"random\"\n cfg.MODEL.CLOUDS.SAM.RM_INTERSECTION = True\n cfg.MODEL.CLOUDS.SAM.REFINEMENT = False\n cfg.MODEL.CLOUDS.SAM.ALPHA_EMA = 0.999\n cfg.MODEL.CLOUDS.OVERWRITING = True\n cfg.MODEL.CLOUDS.ITERATION_UPDATE = 100" }, { "identifier": "add_wandb_config", "path": "clouds/config.py", "snippet": "def add_wandb_config(cfg):\n # Wandb\n cfg.WANDB = CN()\n cfg.WANDB.PROJECT = \"clouds\"\n cfg.WANDB.NAME = None\n # use flash attention\n cfg.MODEL.FLASH = False" }, { "identifier": "add_prerocessing_training_set_config", "path": "clouds/config.py", "snippet": "def add_prerocessing_training_set_config(cfg):\n cfg.INPUT.FLIP = True\n cfg.INPUT.INITIAL_HEIGHT = 1052\n cfg.INPUT.INITIAL_WIDTH = 1914\n cfg.INPUT.RESIZE_HEIGHT = 720\n cfg.INPUT.RESIZE_WIDTH = 1280\n cfg.INPUT.PL_THRESHOLD = 0.0\n\n cfg.DATASETS.SOURCE_FACTOR = 1.0\n cfg.DATASETS.TARGET_FACTOR = 1.0" }, { "identifier": "add_repeat_factors", "path": "clouds/config.py", "snippet": "def add_repeat_factors(cfg):\n # for the Dataset repeat factor\n if (\n len(cfg.DATASETS.TRAIN) == 2\n and cfg.DATALOADER.SAMPLER_TRAIN == \"WeightedTrainingSampler\"\n ):\n if \"sd\" in cfg.DATASETS.TRAIN[0]:\n target_dataset = cfg.DATASETS.TRAIN[0]\n source_dataset = cfg.DATASETS.TRAIN[1]\n else:\n target_dataset = cfg.DATASETS.TRAIN[1]\n source_dataset = cfg.DATASETS.TRAIN[0]\n\n TRAIN_REPEAT_FACTOR = [\n (target_dataset, cfg.DATASETS.TARGET_FACTOR),\n (source_dataset, cfg.DATASETS.SOURCE_FACTOR),\n ]\n cfg.DATASETS.TRAIN_REPEAT_FACTOR = TRAIN_REPEAT_FACTOR\n return cfg\n else:\n return cfg" }, { "identifier": "MapperTrain", "path": "clouds/data/dataset_mappers/mapper_train.py", "snippet": "class MapperTrain:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations_src,\n augmentations_sd,\n augmentations_photo,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens_src = augmentations_src\n self.tfm_gens_sd = augmentations_sd\n self.tfm_gens_photometric = augmentations_photo\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(\n f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations_src}\"\n )\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n augs_src = []\n augs_sd = []\n augs_photometric = []\n # Build augmentation\n if cfg.INPUT.RESIZE.ENABLED:\n augs_src.append(\n T.ResizeScale(\n min_scale=0.5,\n max_scale=2.0,\n target_height=cfg.INPUT.INITIAL_HEIGHT,\n target_width=cfg.INPUT.INITIAL_WIDTH,\n interp=Image.BILINEAR,\n )\n )\n if cfg.INPUT.CROP.ENABLED:\n augs_src.append(\n T.FixedSizeCrop(\n (768, 768),\n pad=True,\n seg_pad_value=255,\n pad_value=0,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs_src.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs_photometric.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n if cfg.INPUT.FLIP:\n augs_src.append(T.RandomFlip())\n augs_sd.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations_src\": augs_src,\n \"augmentations_sd\": augs_sd,\n \"augmentations_photo\": augs_photometric,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert (\n self.is_train\n ), \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\n \"double\"\n )\n else:\n sem_seg_gt = np.full(\n (dataset_dict[\"height\"], dataset_dict[\"width\"]), self.ignore_label\n ).astype(\"double\")\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n if not (\"generated\" in str(dataset_dict[\"image_id\"])):\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_src, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n else:\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_sd, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n aug_input_photo, transforms = T.apply_transform_gens(\n self.tfm_gens_photometric, aug_input\n )\n image_aug = aug_input_photo.image\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = torch.as_tensor(\n np.ascontiguousarray(image_aug.transpose(2, 0, 1))\n )\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = F.pad(image_aug, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(\n sem_seg_gt, padding_size, value=self.ignore_label\n ).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n dataset_dict[\"image_aug\"] = image_aug\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\n \"Semantic segmentation dataset should not have 'annotations'.\"\n )\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros(\n (0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])\n )\n else:\n masks = BitMasks(\n torch.stack(\n [\n torch.from_numpy(np.ascontiguousarray(x.copy()))\n for x in masks\n ]\n )\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MapperTest", "path": "clouds/data/dataset_mappers/mapper_test.py", "snippet": "class MapperTest:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n # if recompute_boxes:\n # assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = augmentations\n self.image_format = image_format\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = [T.ResizeShortestEdge(short_edge_length=[1024], sample_style=\"choice\")]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n\n\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transformation = T.apply_transform_gens(self.augmentations, aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n dataset_dict['height'] = dataset_dict[\"image\"].shape[1]\n dataset_dict['width'] = dataset_dict[\"image\"].shape[2]\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n return dataset_dict" }, { "identifier": "CityscapesSemSegEvaluator", "path": "clouds/evaluation/cityscapes_evaluation.py", "snippet": "class CityscapesSemSegEvaluator(CityscapesEvaluator):\n \"\"\"\n Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.\n\n Note:\n * It does not work in multi-machine distributed training.\n * It contains a synchronization, therefore has to be used on all ranks.\n * Only the main process runs evaluation.\n \"\"\"\n\n def process(self, inputs, outputs):\n from cityscapesscripts.helpers.labels import trainId2label\n for input, output in zip(inputs, outputs):\n file_name = input[\"file_name\"]\n basename = os.path.splitext(os.path.basename(file_name))[0]\n pred_filename = os.path.join(self._temp_dir, basename + \"_pred.png\")\n\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device).numpy()\n pred = 255 * np.ones(output.shape, dtype=np.uint8)\n for train_id, label in trainId2label.items():\n if label.ignoreInEval:\n continue\n pred[output == train_id] = label.id\n Image.fromarray(pred).save(pred_filename)\n\n\n def evaluate(self):\n comm.synchronize()\n if comm.get_rank() > 0:\n return\n # Load the Cityscapes eval script *after* setting the required env var,\n # since the script reads CITYSCAPES_DATASET into global variables at load time.\n import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval\n\n self._logger.info(\"Evaluating results under {} ...\".format(self._temp_dir))\n\n # set some global states in cityscapes evaluation API, before evaluating\n cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)\n cityscapes_eval.args.predictionWalk = None\n cityscapes_eval.args.JSONOutput = False\n cityscapes_eval.args.colorized = False\n\n # These lines are adopted from\n # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa\n gt_dir = PathManager.get_local_path(self._metadata.gt_dir)\n groundTruthImgList = glob.glob(\n os.path.join(gt_dir, \"*\", \"*_gtFine_labelIds.png\")\n )\n assert len(\n groundTruthImgList\n ), \"Cannot find any ground truth images to use for evaluation. Searched for: {}\".format(\n cityscapes_eval.args.groundTruthSearch\n )\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(\n cityscapes_eval.getPrediction(cityscapes_eval.args, gt)\n )\n results = cityscapes_eval.evaluateImgLists(\n predictionImgList, groundTruthImgList, cityscapes_eval.args\n )\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": 100.0 * results[\"averageScoreClasses\"],\n \"IoU.road\": 100.0 * results[\"classScores\"][\"road\"],\n \"IoU.sidewalk\": 100.0 * results[\"classScores\"][\"sidewalk\"],\n \"IoU.building\": 100.0 * results[\"classScores\"][\"building\"],\n \"IoU.wall\": 100.0 * results[\"classScores\"][\"wall\"],\n \"IoU.fence\": 100.0 * results[\"classScores\"][\"fence\"],\n \"IoU.pole\": 100.0 * results[\"classScores\"][\"pole\"],\n \"IoU.traffic light\": 100.0 * results[\"classScores\"][\"traffic light\"],\n \"IoU.traffic sign\": 100.0 * results[\"classScores\"][\"traffic sign\"],\n \"IoU.vegetation\": 100.0 * results[\"classScores\"][\"vegetation\"],\n \"IoU.terrain\": 100.0 * results[\"classScores\"][\"terrain\"],\n \"IoU.sky\": 100.0 * results[\"classScores\"][\"sky\"],\n \"IoU.person\": 100.0 * results[\"classScores\"][\"person\"],\n \"IoU.rider\": 100.0 * results[\"classScores\"][\"rider\"],\n \"IoU.car\": 100.0 * results[\"classScores\"][\"car\"],\n \"IoU.truck\": 100.0 * results[\"classScores\"][\"truck\"],\n \"IoU.bus\": 100.0 * results[\"classScores\"][\"bus\"],\n \"IoU.train\": 100.0 * results[\"classScores\"][\"train\"],\n \"IoU.motorcycle\": 100.0 * results[\"classScores\"][\"motorcycle\"],\n \"IoU.bicycle\": 100.0 * results[\"classScores\"][\"bicycle\"],\n }\n if not self._save_pl:\n self._working_dir.cleanup()\n return ret" }, { "identifier": "ClassicalSemSegEvaluator", "path": "clouds/evaluation/semantic_evaluation.py", "snippet": "class ClassicalSemSegEvaluator(DatasetEvaluator):\n \"\"\"\n Evaluate semantic segmentation metrics.\n \"\"\"\n\n def __init__(\n self,\n dataset_name,\n distributed=True,\n output_dir=None,\n *,\n sem_seg_loading_fn=load_image_into_numpy_array,\n num_classes=None,\n ignore_label=None,\n save_pl=False,\n ):\n \"\"\"\n Args:\n dataset_name (str): name of the dataset to be evaluated.\n distributed (bool): if True, will collect results from all ranks for evaluation.\n Otherwise, will evaluate the results in the current process.\n output_dir (str): an output directory to dump results.\n sem_seg_loading_fn: function to read sem seg file and load into numpy array.\n Default provided, but projects can customize.\n num_classes, ignore_label: deprecated argument\n \"\"\"\n self._logger = logging.getLogger(__name__)\n if num_classes is not None:\n self._logger.warn(\n \"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata.\"\n )\n if ignore_label is not None:\n self._logger.warn(\n \"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata.\"\n )\n self._dataset_name = dataset_name\n self._distributed = distributed\n self._output_dir = output_dir\n\n self._cpu_device = torch.device(\"cpu\")\n\n self.input_file_to_gt_file = {\n dataset_record[\"file_name\"]: dataset_record[\"sem_seg_file_name\"]\n for dataset_record in DatasetCatalog.get(dataset_name)\n }\n\n meta = MetadataCatalog.get(dataset_name)\n # Dict that maps contiguous training ids to COCO category ids\n try:\n c2d = meta.stuff_dataset_id_to_contiguous_id\n self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}\n except AttributeError:\n self._contiguous_id_to_dataset_id = None\n self._class_names = meta.stuff_classes\n self.sem_seg_loading_fn = sem_seg_loading_fn\n self._num_classes = len(meta.stuff_classes)\n if num_classes is not None:\n assert (\n self._num_classes == num_classes\n ), f\"{self._num_classes} != {num_classes}\"\n self._ignore_label = (\n ignore_label if ignore_label is not None else meta.ignore_label\n )\n\n # This is because cv2.erode did not work for int datatype. Only works for uint8.\n self._compute_boundary_iou = True\n if not _CV2_IMPORTED:\n self._compute_boundary_iou = False\n self._logger.warn(\n \"\"\"Boundary IoU calculation requires OpenCV. B-IoU metrics are\n not going to be computed because OpenCV is not available to import.\"\"\"\n )\n if self._num_classes >= np.iinfo(np.uint8).max:\n self._compute_boundary_iou = False\n self._logger.warn(\n f\"\"\"SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation!\n B-IoU metrics are not going to be computed. Max allowed value (exclusive)\n for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}.\n The number of classes of dataset {self._dataset_name} is {self._num_classes}\"\"\"\n )\n self._save_pl = save_pl\n\n def reset(self):\n self._conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._b_conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._predictions = []\n\n def process(self, inputs, outputs):\n \"\"\"\n Args:\n inputs: the inputs to a model.\n It is a list of dicts. Each dict corresponds to an image and\n contains keys like \"height\", \"width\", \"file_name\".\n outputs: the outputs of a model. It is either list of semantic segmentation predictions\n (Tensor [H, W]) or list of dicts with key \"sem_seg\" that contains semantic\n segmentation prediction in the same format.\n \"\"\"\n for input, output in zip(inputs, outputs):\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device)\n pred = np.array(output, dtype=int)\n gt = input[\"sem_seg\"].numpy()\n\n gt[gt == self._ignore_label] = self._num_classes\n\n self._conf_matrix += np.bincount(\n (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._compute_boundary_iou:\n b_gt = self._mask_to_boundary(gt.astype(np.uint8))\n b_pred = self._mask_to_boundary(pred.astype(np.uint8))\n\n self._b_conf_matrix += np.bincount(\n (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._save_pl:\n self._predictions.extend(\n [dict(file_name=input[\"file_name\"], pred=pred)]\n )\n else:\n self._predictions.extend(\n self.encode_json_sem_seg(pred, input[\"file_name\"])\n )\n\n def evaluate(self):\n \"\"\"\n Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):\n\n * Mean intersection-over-union averaged across classes (mIoU)\n * Frequency Weighted IoU (fwIoU)\n * Mean pixel accuracy averaged across classes (mACC)\n * Pixel Accuracy (pACC)\n \"\"\"\n if self._distributed:\n synchronize()\n conf_matrix_list = all_gather(self._conf_matrix)\n b_conf_matrix_list = all_gather(self._b_conf_matrix)\n self._predictions = all_gather(self._predictions)\n self._predictions = list(itertools.chain(*self._predictions))\n if not is_main_process():\n return\n\n self._conf_matrix = np.zeros_like(self._conf_matrix)\n for conf_matrix in conf_matrix_list:\n self._conf_matrix += conf_matrix\n\n self._b_conf_matrix = np.zeros_like(self._b_conf_matrix)\n for b_conf_matrix in b_conf_matrix_list:\n self._b_conf_matrix += b_conf_matrix\n\n if self._output_dir:\n first_elem = self._predictions[0]\n if \"bdd\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"bdd_eval_pl\")\n elif \"mapillary\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"mapillary_eval_pl\")\n PathManager.mkdirs(self._output_dir)\n if self._save_pl:\n # A function that will iterate over the list of dictionnaries and write the corresponding image\n # in the output directory\n def write_image_from_dict(dict):\n filename = os.path.join(\n self._output_dir,\n dict[\"file_name\"].split(\"/\")[-1].split(\".\")[0] + \"_pred.png\",\n )\n pred = dict[\"pred\"]\n pred = get_rgb_from_semantic_map_maxed(pred)\n # pred = Image.fromarray(pred)\n pred.save(filename)\n\n # We apply the function to the list of dictionnaries\n list(map(write_image_from_dict, self._predictions))\n\n else:\n file_path = os.path.join(self._output_dir, \"sem_seg_predictions.json\")\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(self._predictions))\n\n acc = np.full(self._num_classes, np.nan, dtype=float)\n iou = np.full(self._num_classes, np.nan, dtype=float)\n tp = self._conf_matrix.diagonal()[:-1].astype(float)\n pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(float)\n class_weights = pos_gt / np.sum(pos_gt)\n pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(float)\n acc_valid = pos_gt > 0\n acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]\n union = pos_gt + pos_pred - tp\n iou_valid = np.logical_and(acc_valid, union > 0)\n iou[iou_valid] = tp[iou_valid] / union[iou_valid]\n macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)\n miou = np.sum(iou[iou_valid]) / np.sum(iou_valid)\n fiou = np.sum(iou[iou_valid] * class_weights[iou_valid])\n pacc = np.sum(tp) / np.sum(pos_gt)\n\n if self._compute_boundary_iou:\n b_iou = np.full(self._num_classes, np.nan, dtype=float)\n b_tp = self._b_conf_matrix.diagonal()[:-1].astype(float)\n b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(float)\n b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(float)\n b_union = b_pos_gt + b_pos_pred - b_tp\n b_iou_valid = b_union > 0\n b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid]\n\n res = {}\n res[\"mIoU\"] = 100 * miou\n res[\"fwIoU\"] = 100 * fiou\n for i, name in enumerate(self._class_names):\n res[f\"IoU-{name}\"] = 100 * iou[i]\n if self._compute_boundary_iou:\n res[f\"BoundaryIoU-{name}\"] = 100 * b_iou[i]\n res[f\"min(IoU, B-Iou)-{name}\"] = 100 * min(iou[i], b_iou[i])\n res[\"mACC\"] = 100 * macc\n res[\"pACC\"] = 100 * pacc\n for i, name in enumerate(self._class_names):\n res[f\"ACC-{name}\"] = 100 * acc[i]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"sem_seg_evaluation.pth\")\n with PathManager.open(file_path, \"wb\") as f:\n torch.save(res, f)\n results = OrderedDict({\"sem_seg\": res})\n self._logger.info(results)\n\n def get_miou_value_from_dict(dict, subkey):\n for key, value in dict.items():\n if subkey in key and \"IoU\" in key:\n if np.isnan(value):\n return 0\n else:\n return value\n\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": results[\"sem_seg\"][\"mIoU\"],\n \"IoU.road\": get_miou_value_from_dict(results[\"sem_seg\"], \"road\"),\n \"IoU.sidewalk\": get_miou_value_from_dict(results[\"sem_seg\"], \"sidewalk\"),\n \"IoU.building\": get_miou_value_from_dict(results[\"sem_seg\"], \"building\"),\n \"IoU.wall\": get_miou_value_from_dict(results[\"sem_seg\"], \"wall\"),\n \"IoU.fence\": get_miou_value_from_dict(results[\"sem_seg\"], \"fence\"),\n \"IoU.pole\": get_miou_value_from_dict(results[\"sem_seg\"], \"pole\"),\n \"IoU.traffic light\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic light\"\n ),\n \"IoU.traffic sign\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic sign\"\n ),\n \"IoU.vegetation\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"vegetation\"\n ),\n \"IoU.terrain\": get_miou_value_from_dict(results[\"sem_seg\"], \"terrain\"),\n \"IoU.sky\": get_miou_value_from_dict(results[\"sem_seg\"], \"sky\"),\n \"IoU.person\": get_miou_value_from_dict(results[\"sem_seg\"], \"person\"),\n \"IoU.rider\": get_miou_value_from_dict(results[\"sem_seg\"], \"rider\"),\n \"IoU.car\": get_miou_value_from_dict(results[\"sem_seg\"], \"car\"),\n \"IoU.truck\": get_miou_value_from_dict(results[\"sem_seg\"], \"truck\"),\n \"IoU.bus\": get_miou_value_from_dict(results[\"sem_seg\"], \"bus\"),\n \"IoU.train\": get_miou_value_from_dict(results[\"sem_seg\"], \"train\"),\n \"IoU.motorcycle\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"motorcycle\"\n ),\n \"IoU.bicycle\": get_miou_value_from_dict(results[\"sem_seg\"], \"bicycle\"),\n }\n return ret\n\n def encode_json_sem_seg(self, sem_seg, input_file_name):\n \"\"\"\n Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.\n See http://cocodataset.org/#format-results\n \"\"\"\n json_list = []\n for label in np.unique(sem_seg):\n if self._contiguous_id_to_dataset_id is not None:\n assert (\n label in self._contiguous_id_to_dataset_id\n ), \"Label {} is not in the metadata info for {}\".format(\n label, self._dataset_name\n )\n dataset_id = self._contiguous_id_to_dataset_id[label]\n else:\n dataset_id = int(label)\n mask = (sem_seg == label).astype(np.uint8)\n mask_rle = mask_util.encode(np.array(mask[:, :, None], order=\"F\"))[0]\n mask_rle[\"counts\"] = mask_rle[\"counts\"].decode(\"utf-8\")\n json_list.append(\n {\n \"file_name\": input_file_name,\n \"category_id\": dataset_id,\n \"segmentation\": mask_rle,\n }\n )\n return json_list\n\n def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02):\n assert mask.ndim == 2, \"mask_to_boundary expects a 2-dimensional image\"\n h, w = mask.shape\n diag_len = np.sqrt(h ** 2 + w ** 2)\n dilation = max(1, int(round(dilation_ratio * diag_len)))\n kernel = np.ones((3, 3), dtype=np.uint8)\n\n padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)\n eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation)\n eroded_mask = eroded_mask_with_padding[1:-1, 1:-1]\n boundary = mask - eroded_mask\n return boundary" }, { "identifier": "PersoEvalHook", "path": "clouds/engine/hooks.py", "snippet": "class PersoEvalHook(HookBase):\n \"\"\"\n Run an evaluation function periodically, and at the end of training.\n\n It is executed every ``eval_period`` iterations and after the last iteration.\n \"\"\"\n\n def __init__(self, eval_period, eval_function, eval_after_train=True):\n \"\"\"\n Args:\n eval_period (int): the period to run `eval_function`. Set to 0 to\n not evaluate periodically (but still evaluate after the last iteration\n if `eval_after_train` is True).\n eval_function (callable): a function which takes no arguments, and\n returns a nested dict of evaluation metrics.\n eval_after_train (bool): whether to evaluate after the last iteration\n\n Note:\n This hook must be enabled in all or none workers.\n If you would like only certain workers to perform evaluation,\n give other workers a no-op function (`eval_function=lambda: None`).\n \"\"\"\n self._period = eval_period\n self._func = eval_function\n self._eval_after_train = eval_after_train\n\n def _do_eval(self):\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)\n\n # Evaluation may take different time among workers.\n # A barrier make them start the next iteration together.\n comm.synchronize()\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n if \"debug\" in self.trainer.cfg.OUTPUT_DIR:\n pass\n else:\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(\n **flattened_results, smoothing_hint=False\n )\n\n def after_step(self):\n next_iter = self.trainer.iter + 1\n if self._period > 0 and next_iter % self._period == 0:\n # do the last eval in after_train\n if next_iter != self.trainer.max_iter:\n self._do_eval()\n\n def after_train(self):\n # This condition is to prevent the eval from running after a failed training\n if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter:\n self._do_eval()\n # func is likely a closure that holds reference to the trainer\n # therefore we clean it to avoid circular reference in the end\n del self._func" }, { "identifier": "WandbWriter", "path": "clouds/utils/events.py", "snippet": "class WandbWriter(EventWriter):\n \"\"\"\n Write all scalars to a tensorboard file.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Args:\n log_dir (str): the directory to save the output events\n kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`\n \"\"\"\n self._last_write = -1\n self._group_rules = [\n (IsIn(\"/\"), BaseRule()),\n (IsIn(\"loss\"), Prefix(\"train\")),\n # (IsIn(\"sem_seg\"), Prefix(\"val\")),\n (\n IsInList([\"lr\", \"time\", \"eta_seconds\", \"rank_data_time\", \"data_time\"]),\n Prefix(\"stats\"),\n ),\n ]\n\n def write(self):\n storage = get_event_storage()\n\n def _group_name(scalar_name):\n for rule, op in self._group_rules:\n if rule(scalar_name):\n return op(scalar_name)\n return scalar_name\n\n stats = {\n _group_name(name): scalars[0]\n for name, scalars in storage.latest().items()\n if scalars[1] > self._last_write\n }\n if len(stats) > 0:\n self._last_write = max([v[1] for k, v in storage.latest().items()])\n\n # storage.put_{image,histogram} is only meant to be used by\n # tensorboard writer. So we access its internal fields directly from here.\n if len(storage._vis_data) >= 1:\n stats[\"image\"] = [\n wandb.Image(img, caption=img_name)\n for img_name, img, step_num in storage._vis_data\n ]\n # Storage stores all image data and rely on this writer to clear them.\n # As a result it assumes only one writer will use its image data.\n # An alternative design is to let storage store limited recent\n # data (e.g. only the most recent image) that all writers can access.\n # In that case a writer may not see all image data if its period is long.\n storage.clear_images()\n\n if len(storage._histograms) >= 1:\n\n def create_bar(tag, bucket_limits, bucket_counts, **kwargs):\n data = [\n [label, val] for (label, val) in zip(bucket_limits, bucket_counts)\n ]\n table = wandb.Table(data=data, columns=[\"label\", \"value\"])\n return wandb.plot.bar(table, \"label\", \"value\", title=tag)\n\n stats[\"hist\"] = [create_bar(**params) for params in storage._histograms]\n\n storage.clear_histograms()\n\n if len(stats) == 0:\n return\n wandb.log(stats, step=storage.iter)\n\n def close(self):\n wandb.finish()" }, { "identifier": "setup_wandb", "path": "clouds/utils/events.py", "snippet": "def setup_wandb(cfg, args):\n if comm.is_main_process():\n init_args = {\n k.lower(): v\n for k, v in cfg.WANDB.items()\n if isinstance(k, str) and k not in [\"config\", \"name\"]\n }\n if \"config_exclude_keys\" in init_args:\n init_args[\"config\"] = cfg\n init_args[\"config\"][\"cfg_file\"] = args.config_file\n else:\n init_args[\"config\"] = {\n \"output_dir\": cfg.OUTPUT_DIR,\n \"train\": extract_dataset_from_string(cfg.DATASETS.TRAIN),\n \"test\": extract_dataset_from_string(cfg.DATASETS.TEST),\n \"iter\": cfg.SOLVER.MAX_ITER,\n \"lr\": cfg.SOLVER.BASE_LR,\n \"batch_size\": cfg.SOLVER.IMS_PER_BATCH,\n \"cfg_file\": args.config_file,\n }\n\n init_args[\"group\"] = get_base_name(cfg)\n if cfg.WANDB.NAME is not None:\n init_args[\"name\"] = cfg.WANDB.NAME\n else:\n init_args[\"name\"] = get_full_name_xp(init_args[\"group\"], cfg)\n if \"debug\" in cfg.OUTPUT_DIR:\n init_args[\"project\"] = \"debug\"\n wandb.init(**init_args)" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
14,541
@classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg)
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name): mapper = MapperTest(cfg, False) return build_detection_test_loader( cfg, dataset_name, batch_size=1, mapper=mapper ) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if cfg.MODEL.CLOUDS.OVERWRITING: if any( ignored_module in module_name for ignored_module in ["sem_seg_head_ema.", "sam.sam."] ): continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = ( hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER ) if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain( *[x["params"] for x in self.param_groups] ) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg)
add_maskformer2_config(cfg)
0
2023-12-15 15:40:58+00:00
24k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/multihead_attention.py
[ { "identifier": "FairseqDropout", "path": "multi_part_assembly/utils/wx_transformer_utilities/fairseq_dropout.py", "snippet": "class FairseqDropout(nn.Module):\n\n def __init__(self, p, module_name=None):\n super().__init__()\n self.p = p\n self.module_name = module_name\n self.apply_during_inference = False\n\n def forward(self, x, inplace: bool = False):\n if self.training or self.apply_during_inference:\n return F.dropout(x, p=self.p, training=True, inplace=inplace)\n else:\n return x\n\n def make_generation_fast_(\n self,\n name: str,\n retain_dropout: bool = False,\n retain_dropout_modules: Optional[List[str]] = None,\n **kwargs\n ):\n if retain_dropout:\n if retain_dropout_modules is not None and self.module_name is None:\n logger.warning(\n 'Cannot enable dropout during inference for module {} '\n 'because module_name was not set'.format(name)\n )\n elif (\n retain_dropout_modules is None # if None, apply to all modules\n or self.module_name in retain_dropout_modules\n ):\n logger.info(\n 'Enabling dropout during inference for module: {}'.format(name)\n )\n self.apply_during_inference = True\n else:\n logger.info('Disabling dropout for module: {}'.format(name))" }, { "identifier": "MultiHeadAttention", "path": "multi_part_assembly/utils/wx_transformer_utilities/attention_rim.py", "snippet": "class MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model_read, d_model_write, d_model_out, d_k, d_v, grad_sparse, residual=True, dropout=0.1, skip_write=False, flag=False):\n super().__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Initialize Multi-Head Attention~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print('d model read: ', d_model_read)\n # print('d_model_write: ', d_model_write)\n # print('d_model_out: ', d_model_out)\n # print('n_head: ', n_head)\n # print('d_k: ', d_k)\n # print('d_v: ', d_v)\n # print('num_blocks_read: ', num_blocks_read)\n # print('num_blocks_write: ', num_blocks_write)\n # input()\n\n self.GLN_qs = nn.Linear(d_model_read, n_head * d_k)\n self.GLN_ks = nn.Linear(d_model_write, n_head * d_k)\n self.GLN_vs = nn.Linear(d_model_write, n_head * d_v)\n\n self.residual = residual\n\n #self.w_qs = nn.Linear(d_model_read, n_head * d_k)\n #self.w_ks = nn.Linear(d_model_write, n_head * d_k)\n #self.w_vs = nn.Linear(d_model_write, n_head * d_v)\n\n #nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n #nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n #nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))\n\n self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5), flag=flag)\n #self.layer_norm = nn.LayerNorm(d_model)\n\n self.gate_fc = nn.Linear(n_head * d_v, d_model_out)\n\n if not skip_write:\n self.fc = nn.Linear(n_head * d_v, d_model_out)\n else:\n self.fc = lambda a: a\n\n #nn.init.xavier_normal_(self.fc.weight)\n\n self.dropout = nn.Dropout(dropout)\n\n self.ln = nn.LayerNorm(d_model_out)\n\n def forward(self, q, k, v, mask=None):\n\n #print('attn input shape', q.shape)\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n\n sz_b, len_q, _ = q.size()\n sz_b, len_k, _ = k.size()\n sz_b, len_v, _ = v.size()\n\n residual = q\n\n #print('q shape', q.shape)\n\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Forward of Multi-Head Attention~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print(\"q: \", q.size())\n # print(\"k: \", k.size())\n # print(\"v: \", v.size())\n # input()\n\n q = self.GLN_qs(q).view(sz_b, len_q, n_head, d_k)\n #q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.GLN_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.GLN_vs(v).reshape(sz_b, len_v, n_head, d_v)\n #v = v.view(sz_b, len_v, n_head, d_v)\n\n # print(\"GLN q: \", q.size())\n # print(\"GLN k: \", k.size())\n # print(\"GLN v: \", v.size())\n\n q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk\n k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk\n v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv\n\n # print(\"Permute q: \", q.size())\n # print(\"Permute k: \", k.size())\n # print(\"Permute v: \", v.size())\n\n #mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..\n output, attn, extra_loss = self.attention(q, k, v, mask=None)\n\n # print(\"Output: \", output.size())\n # print(\"Attention: \", attn.size())\n\n output = output.view(n_head, sz_b, len_q, d_v)\n output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)\n\n # print(\"Here Output: \", output.size())\n\n #print('output shape before fc', output.shape)\n\n #TODO: probably shouldn't just apply residual layer in the forward pass.\n\n output_init = output*1.0\n\n output = self.dropout(self.fc(output_init))\n\n gate = torch.sigmoid(self.gate_fc(output_init))\n\n #output = self.layer_norm(gate * output + (1 - gate) * residual)\n #output = gate * output + (1 - gate) * residual\n\n if self.residual:\n output = gate * torch.tanh(output)\n else:\n #output = self.ln(output)\n pass\n\n # print(\"Final Output: \", output.size())\n\n #output\n\n #print('attn', attn[0])\n #print('output input diff', output - residual)\n\n return output, attn, extra_loss" }, { "identifier": "quant_noise", "path": "multi_part_assembly/utils/wx_transformer_utilities/quant_noise.py", "snippet": "def quant_noise(module, p, block_size):\n \"\"\"\n Wraps modules and applies quantization noise to the weights for\n subsequent quantization with Iterative Product Quantization as\n described in \"Training with Quantization Noise for Extreme Model Compression\"\n\n Args:\n - module: nn.Module\n - p: amount of Quantization Noise\n - block_size: size of the blocks for subsequent quantization with iPQ\n\n Remarks:\n - Module weights must have the right sizes wrt the block size\n - Only Linear, Embedding and Conv2d modules are supported for the moment\n - For more detail on how to quantize by blocks with convolutional weights,\n see \"And the Bit Goes Down: Revisiting the Quantization of Neural Networks\"\n - We implement the simplest form of noise here as stated in the paper\n which consists in randomly dropping blocks\n \"\"\"\n\n # if no quantization noise, don't register hook\n if p <= 0:\n return module\n\n # supported modules\n assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))\n\n # test whether module.weight has the right sizes wrt block_size\n is_conv = module.weight.ndim == 4\n\n # 2D matrix\n if not is_conv:\n assert module.weight.size(1) % block_size == 0, \"Input features must be a multiple of block sizes\"\n\n # 4D matrix\n else:\n # 1x1 convolutions\n if module.kernel_size == (1, 1):\n assert module.in_channels % block_size == 0, \"Input channels must be a multiple of block sizes\"\n # regular convolutions\n else:\n k = module.kernel_size[0] * module.kernel_size[1]\n assert k % block_size == 0, \"Kernel size must be a multiple of block size\"\n\n def _forward_pre_hook(mod, input):\n # no noise for evaluation\n if mod.training:\n if not is_conv:\n # gather weight and sizes\n weight = mod.weight\n in_features = weight.size(1)\n out_features = weight.size(0)\n\n # split weight matrix into blocks and randomly drop selected blocks\n mask = torch.zeros(in_features // block_size * out_features, device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)\n\n else:\n # gather weight and sizes\n weight = mod.weight\n in_channels = mod.in_channels\n out_channels = mod.out_channels\n\n # split weight matrix into blocks and randomly drop selected blocks\n if mod.kernel_size == (1, 1):\n mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)\n else:\n mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])\n\n # scale weights and apply mask\n mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript\n s = 1 / (1 - p)\n mod.weight.data = s * weight.masked_fill(mask, 0)\n\n module.register_forward_pre_hook(_forward_pre_hook)\n return module" }, { "identifier": "GroupLinearLayer", "path": "multi_part_assembly/utils/wx_transformer_utilities/group_linear_layer.py", "snippet": "class GroupLinearLayer(nn.Module):\n\n def __init__(self, din, dout, num_blocks, bias=True, a = None):\n super(GroupLinearLayer, self).__init__()\n self.nb = num_blocks\n self.dout = dout\n\n if a is None:\n a = 1. / math.sqrt(dout * num_blocks)\n\n #gain = 1.0 / math.sqrt(2)\n #a = gain * math.sqrt(6.0 / (din + dout))\n\n self.weight = nn.Parameter(torch.FloatTensor(num_blocks,din,dout).uniform_(-a,a))\n\n self.bias = bias\n\n if bias is True:\n self.bias = nn.Parameter(torch.FloatTensor(num_blocks,dout).uniform_(-a,a))\n #self.bias = nn.Parameter(torch.zeros(dout*num_blocks))\n else:\n self.bias = None\n\n def forward(self,x):\n\n\t#input: ts x bs x blocks*nhid\n\t#ts*bs , blocks, nhid\n\t#blocks, ts*bs, nhid\n ts,bs,m = x.shape\t\n\n x = x.reshape((ts*bs, self.nb, m//self.nb))\n x = x.permute(1,0,2)\n x = torch.bmm(x,self.weight)\n x = x.permute(1,0,2)\n \n if not self.bias is None:\n x = x + self.bias\n\n x = x.reshape((ts, bs, self.dout*self.nb))\n \n #if not self.bias is None:\n # x += self.bias\n\n return x" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory_volatile.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n print(\"Using gate style\", gate_style)\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n self.attn_log = None\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n count_parameters(\"query\", self.query_proj)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n count_parameters(\"key\", self.key_proj)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n count_parameters(\"value\", self.value_proj)\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n #self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n #self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n count_parameters(\"attention_mlp\", self.attention_mlp[0])\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n count_parameters(\"layernorm1\", self.attended_memory_layernorm)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n count_parameters(\"layernorm2\", self.attended_memory_layernorm2)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n count_parameters(\"input_projector\", self.input_projector)\n\n #self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n \n if gate_style in ['unit', 'memory']:\n self.input_gate_projector = RepeatLinear(self.mem_size, self.num_gates, num_steps)\n count_parameters(\"input_gate_projector\", self.input_gate_projector)\n self.memory_gate_projector = GroupLinearLayer(self.mem_size, self.num_gates, self.mem_slots)\n #self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n\n #(self.mem_size, self.num_gates, self.mem_slots)\n count_parameters(\"memory_gate_projector\", self.memory_gate_projector)\n \n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n print(\"relational volatie!!!\") \n #self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n # nn.ReLU(),\n # nn.Linear(256, 256),\n # nn.ReLU(),\n # nn.Linear(256, 256),\n # nn.ReLU(),\n # nn.Linear(256, 2))\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n if True:\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n return init_state\n else:\n init_state = torch.randn(batch_size, self.mem_slots, self.mem_size)\n return init_state\n def multihead_attention(self, input, memory, use_topk_ = True, store_log = True):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n scores = torch.softmax(scores, dim = -1)\n #if store_log:\n # self.attn_log = scores[0]\n if not self.null_attention:\n if self.use_topk and use_topk_:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n mask = torch.zeros(scores.size()).to(scores.device)\n mask.scatter_(3, topk.indices, 1)\n scores = scores * mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def print_log(self):\n print(self.attn_log)\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n \n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n #print('jello')\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n #self.attn_log = gates[0]\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n self.attn_log = torch.zeros(input_gate.shape[1], input_gate.shape[2], 2)\n self.attn_log[:, :, 0] = input_gate[0].cpu()\n\n input_gate = torch.sigmoid(input_gate+self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n #memory = self.multihead_attention(memory, memory, use_topk_ = False, store_log = False)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory)\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(inputs_reshape, memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n self.attn_log[:, :, 1] = input_gate[0].cpu()\n\n\n output = next_memory.reshape(next_memory.shape[0], -1)\n hx = self.multihead_attention(next_memory, inputs_reshape, use_topk_ = False, store_log = False)\n return output, next_memory, hx\n\n def forward(self, inputs, memory, parallel = True):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n if not parallel:\n for idx_step in range(inputs.shape[1]):\n logit, memory = self.forward_step(inputs[:, idx_step], memory)\n logits.append(logit)\n logits = torch.cat(logits)\n else:\n logits, memory, hx = self.forward_step(inputs, memory, treat_input_as_matrix = True)\n \n memory_out = None #self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory, hx\n else:\n return logits, memory_out, memory, hx" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory_regressive.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n\n self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n self.input_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 2))\n self.score_log = None\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, ts, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n init_state = init_state.unsqueeze(1)\n init_state = init_state.repeat(1, ts, 1, 1)\n init_state = init_state.reshape(batch_size * ts, self.mem_slots, -1)\n\n return init_state\n\n def multihead_attention(self, input, memory, mask = None):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n mask = mask.unsqueeze(1).unsqueeze(1)\n #print(mask.size())\n #print(scores.size())\n #scores = scores.masked_fill(mask.bool(), float('-inf'))\n scores = Identity().apply(scores)\n\n scores = torch.softmax(scores, dim = -1)\n scores = scores * mask # mask for attending to prev positions only\n self.score_log = scores\n if True:\n if self.use_topk:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n topk_mask = torch.zeros(scores.size()).to(scores.device)\n topk_mask.scatter_(3, topk.indices, 1)\n scores = scores * topk_mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n inputs = inputs.view(inputs.shape[0], -1)\n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n #print(inputs.size())\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n input_gate = torch.sigmoid(input_gate + self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory, mask = None):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory, mask = mask)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False, mask = None, other_inp = None):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n #print(inputs.size())\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n #print(inputs_reshape.size())\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory, mask = mask)\n\n #print(next_memory.size())\n #print(inputs_reshape.size())\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(other_inp.unsqueeze(1), memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n\n\n output = next_memory.view(next_memory.shape[0], -1)\n return output, next_memory\n\n # relational memory这里是不是\n def forward(self, inputs, memory):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n B, T, D = inputs.size()\n mask = torch.ones(inputs.size(1), inputs.size(1)).to(inputs.device)\n mask = torch.tril(mask)\n mask = mask.unsqueeze(0)\n mask = mask.repeat(inputs.size(0), 1, 1)\n\n mask = mask.reshape(mask.size(0) * mask.size(1), -1)\n\n inputs_ = inputs.unsqueeze(2)\n inputs_ = inputs_.repeat(1, 1, inputs.size(1), 1)\n inputs_ = inputs_.reshape(B * T, T, -1)\n\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n #if not parallel:\n # for idx_step in range(inputs.shape[1]):\n # logit, memory = self.forward_step(inputs[:, idx_step], memory)\n # logits.append(logit)\n # logits = torch.cat(logits)\n #else:\n logits, memory = self.forward_step(inputs_, memory, treat_input_as_matrix = True, mask = mask, other_inp = inputs.reshape(B * T, -1))\n \n memory_out = self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory\n else:\n return logits, memory_out, memory\n\n def print_log(self):\n print(self.score_log[25])" } ]
import math import time import numpy as np import torch import torch.nn.functional as F import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils from typing import Dict, Optional, Tuple from torch import Tensor, nn from torch.nn import Parameter from .fairseq_dropout import FairseqDropout from .attention_rim import MultiHeadAttention as MHAMemory from .quant_noise import quant_noise from .group_linear_layer import GroupLinearLayer from .relational_memory_volatile import RelationalMemory from .relational_memory_regressive import RelationalMemory as RelationalMemoryRegressive
14,454
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #import models.fairseq_util #from fairseq.incremental_decoding_utils import with_incremental_state #from .relational_memory_lstm import RelationalMemory # 为什么作者没有从这两个类别中引入relmem? #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer as GroupLinearLayer class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, nblocks=1, top_k_ratio=None, use_value_competition=True, shared_memory_attention = False, use_topk = False, topk = 3, num_steps = 5, mem_slots = 4, null_attention = False, regressive = False ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads self.shared_memory_attention = shared_memory_attention print('total heads', self.num_heads) print('head dim', self.head_dim) self.use_topk = use_topk self.topk = topk print('use topk?' + str(self.use_topk)) print('topk:'+str(self.topk)) assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗? self.k_proj = quant_noise(GroupLinearLayer(self.kdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size) self.v_proj = quant_noise(GroupLinearLayer(self.vdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size) self.q_proj = quant_noise(GroupLinearLayer(embed_dim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size) self.out_proj = quant_noise(GroupLinearLayer(embed_dim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) if self.shared_memory_attention: self.bias_k_memory = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v_memory = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.bias_k_memory = self.bias_v_memory = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False self.tpu = False # 这里表示,如果共享memory_attention的话 if self.shared_memory_attention: print('MEM SLOTS:' + str(mem_slots)) print('Null attention:' + str(null_attention)) print('USING SHARED MEMORY ATTENTION +++++++++') #self.num_heads = 1 self.regressive = regressive if not regressive:
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #import models.fairseq_util #from fairseq.incremental_decoding_utils import with_incremental_state #from .relational_memory_lstm import RelationalMemory # 为什么作者没有从这两个类别中引入relmem? #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer as GroupLinearLayer class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, nblocks=1, top_k_ratio=None, use_value_competition=True, shared_memory_attention = False, use_topk = False, topk = 3, num_steps = 5, mem_slots = 4, null_attention = False, regressive = False ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads self.shared_memory_attention = shared_memory_attention print('total heads', self.num_heads) print('head dim', self.head_dim) self.use_topk = use_topk self.topk = topk print('use topk?' + str(self.use_topk)) print('topk:'+str(self.topk)) assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗? self.k_proj = quant_noise(GroupLinearLayer(self.kdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size) self.v_proj = quant_noise(GroupLinearLayer(self.vdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size) self.q_proj = quant_noise(GroupLinearLayer(embed_dim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size) self.out_proj = quant_noise(GroupLinearLayer(embed_dim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) if self.shared_memory_attention: self.bias_k_memory = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v_memory = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.bias_k_memory = self.bias_v_memory = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False self.tpu = False # 这里表示,如果共享memory_attention的话 if self.shared_memory_attention: print('MEM SLOTS:' + str(mem_slots)) print('Null attention:' + str(null_attention)) print('USING SHARED MEMORY ATTENTION +++++++++') #self.num_heads = 1 self.regressive = regressive if not regressive:
self.relational_memory = RelationalMemory(
5
2023-12-15 13:13:01+00:00
24k